var/home/core/zuul-output/0000755000175000017500000000000015150342035014523 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015150355756015505 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000331137115150355671020267 0ustar corecoreۡikubelet.log_o[;r)Br'o-n(!9t%Cs7}g/غIs$r.k9Gf܅AF{b}Wߟ/v]qo_fZsZ-jC4%_̾zׇϘէoW7_~uyi{|||F^lWo%vz_/-~I|3j dF{ "IΩ?PF~J~ ` 17ׅwڋًM)$Fi9qwVGtwL*upr"hA ó/碓@e=Vv!hXoCDiQJxRsL]+,=M`{0w|]?y#~7>d,Sc̝G?ΥF%QV5pDVHwԡ/.2h{ qۀK8yed3KdMvw×`21ɻ}/<@8(PN_,_0;o_x+Fyh\dN9:bġ7 -,qӸZpə+1i:yWO[l6ro%-9}tX 1hɨ|аK"B[T'A25T jzdz6"ٍ߬i3x-QI)k=-\$)'/N̔ڧO1_9C'/-#\1, Gꨦ#Jҡ/츣s6KT[<~6շ}~90}(*T7siv`̞Qj_P\ Q]GcPN:E↖t8m1ga5"f[B[fhT/ɾcmXQj#3hEEH*Of äE@O0~yot3iYhKjWlwC[A)햖>r?tRWU1o6jjr<~Tq> `=tJ!aݡ=h6YݭȾju\0Ac/T%;m]~S`#u.Џ1qNp&gK60nqtƅ": C@!P q]G0,d#1}Uli}[H?)M"뛲@.Cs*H _0:O ^~řc6WK JJ5Z<;J_O{.Z8Y CEO+^&HqZY PTUJ2dic3w ?YQgpa` Z_pX)𳧛ƾ9U ^};Էڲ7J9@ kV%g6Q{jv *ruI[|A֐M'NO;uD,z҄R K&Nh c{A`?2ZҘ[a-0V&2D[d#L6l{\Jk}8gf) afs'oIfzZ,I<)9qf e%dhy:O40n'c}g1XҸuFiƠIkaIx( +")OtZ l^ZNCQ6tffEmEφǽ{jt'/#=( %X$=rᫌqpMl)QpL F2G rZ5nmOQq9TAQ;mM9pD6 N`sC4na~Uc)(l fJ>]cNdusmUSTYh>Eeք DKiP`3 aezH5^n)}k~hT(d#iI@YUXPKL:3LVY,ndW9W8QufiŒSq3<uqMQhiae̱F+,~C민v= 09WAu{@>4Cb#O\9fǶy{0$S:z4efb#hQ #_ފH&z!HAd |}p TRi*KsmM+1 P0W YW ].PK$Mj-Kp`zbbq$Igǽgr&P29LcIIGAɐ`P-\:BPS`xiP(/T)#ia-64#fڷbCVg峀%ّ sJV<XTtPmƄR$6~ :QbL2}q|Aq0m|Mq+ _ERƻvT񟜾[mm#?,>t?}=˼l?ff>\fbNJid % Jwe`40^^|ǜd]z dJR-Дxq4lZ,Z[|e 'Ƙ$b2JOh k[b>¾h[۷:>OM=y)֖[Sm5+_?&cj.i ˿7^1+]h,*aklVIkS7d'q N?s%9r}1j#e[tRQ9*ء !ǨLJ- upƜ/4cY\[|Xs܁dIv [@3YNUKkZ{iqi}_ ּ};SN )ǘ΁ՁҺy mڜ]Lr*?Y0P`OCF"&FA*tonzwíRfj^-%[ R'l}jdX*kj1H`z8F5]WeߵcJ0TTƩ0Bly]e?>+ ђ(9Uq EmFjq1z9_^DןR24dwfm#Y~!%rpWMEWMjbn(ek~iQ)à[h2yrOP*Xa>EE衢^}p/:F?}bi0>Oh%\x(bdF"F 'u Qx`j#(g6zƯRo(lџŤnE7^k(>QQ!'7h,sF\jzP\7:Q~f=rWmd'rDZ~;7Mn6s!9^BG`#hLҡwo˹v>7 _:+$߇v{vzM$VbήdsOw<}#b[E7imH'Y`;5{$ь'gISzp; AQvDIyHc{'X1BEb $xd(21i)//_і/Cޮm0VKz>I; >d[5Z=4>5!!T@[4 1.x XF`,?Hh]b-#3J( &uz u8.00-(9ŽZcX Jٯ^蒋*k.\ME/Xp9VqNo}#ƓOފgv[r*hy| IϭR-$$m!-W'wTi:4F5^z3/[{1LKߞ[2nM|[<\t=3^qOp4y}|B}yu}뚬"P.ԘBn방u<#< A Q(j%e1!gkqiP(-ʢ-b:dw>w3C39k-{p?րd^T@eFZ#WWwYzK uK r؛6V L)auS6=`#(T'qp^X7c&͌ӒҶW r@/m@6P!{`ͱ)m`6*G-1F 6=X#leU d6xTV6 gn&i"@*"mr栣 IEVpq 0sy OM*@ >n) u{Hk|v;tCl2m s]-$( Pnuݮ)`Q6eMӁKzFZf;5IW1i[xU 0FPM]gl}>6sUDO5f;A@z>T+DE 6Хm<쉶K`ͯ% 0OFjTkPW1mk%?\@R>XCl}b ,8; :.b9m]XaINE`!6u.Ѫv¼%끖[7ɰ5 [jHhaϯ/lX/bjFO.= w ?>ȑ3n?z,t s5Z/ Clo-` z?a~b mzkC zF/}b&x Uhm.O 4m6^^osVЦ+*@5Fˢg'!>$]0 1_glg}릅h:@61Xv` 5DFnx ˭jCtu,R|ۯG8`&ו:ݓ3<:~iXN9`2ŦzhѤ^ MW`c?&d.'[\]}7A[?~R6*.9t,綨 3 6DFe^u; +֡X< paan}7ftJ^%0\?mg5k][ip4@]p6Uu|܀|Kx6خQU2KTǺ.ȕPQVzWuk{n#NWj8+\[ ?yiI~fs[:.۽ '5nWppH? 8>X+m7_Z`V j[ s3n/i{ 6uwŇctyX{>GXg&[ņzP8/f<8sl, 0۬Z"X.~`٦G3TE.֣eմi<~ik[m9뀥!cNIl8y$~\T B "2j*ҕ;ێIs ɛqQQKY`\ +\0(FęRQ hN œ@Nbe7}v+7Zo>W?%TbzK-6cb:XeG?hl&0Ɠbb_2++oI~!&-[TWvxZ>4(sgz1v&YN2姟d4"?oWNW݃yh~%DTt^W7q.@ L⃳662G,:* $: e~7[/P%F on~%dƹɥO"dޢt|BpYqc@P`ڄj҆anCѢMU sf`Y*_h/{͊0:L5:)u3wI$G}qsd*꓎0]TGF[vJ+ Rjv<Ҋ(.GGzpFL`1CS$Ǥ46i*#zN9tT :<XK*ɤ{ U܋N5 l͖h"褁l^=UF^BcAw`g*7R(#ғ [K&#Mp'XގL=s5^:z7y0^} "NqK$2$ Ri ?2,ᙌEK@-V3ʱd:/4Kwm2uZm8pnglVj!p2֬uT[QyB402|2d5K: `Bcz|YץF .Jg< ƜINs:b zĄu3=Az4 u5'og^s7`Rzu-anOIq;6z( rx„2Hi{(2HFE?*w*hy4ޙM^٫wF(p]EwQzr*! 5F XrO7E[!gJ^.a&HߣaaQÝ$_vyz4}0!yܒ栒޹a% Ŋ X!cJ!A\ ?E\R1 q/rJjd A4y4c+bQ̘TT!kw/nb͵FcRG0xeO sw5TV12R7<OG1cjShGg/5TbW > ]~W9dNiee$V[\[Qp-&u~a+3~;xQFFW>='ǣC~방u)т48ZdH;j a]`bGԹ#qiP(yڤ~dO@wA[Vz/$NW\F?H4kX6)F*1*(eJAaݡ krqB}q^fn 8y7P  GRޠkQn>eqQntq"Occ°NRjg#qSn02DŔw:= 5k6JvUL*t*\!j=Ã˼)"޶*̈́\)F^jH Gr%ie A>;^u'y,d$C?v01q5e.Um>]RLa&r?+@6k&#l)I5_> ` D s5npo}/ؙq #a2V?X~.4O/'|//>l8MrHID2VSsMX^"NۯDc558c&'K0L /C5YDqNe~ض˸nErc֋@aw*r܀0 a {RQ^xZb [_tܡ&yЋ{ Sym^?̑sU~' Ԓ f\itu)b>5X -$ck vz(vb$^Nyo$p[DtUCE9sPE[/Y5d{zrBܖ6Hlc "mKv~[uLU4lZ;xEN'oI㤛rP*jC# 6@dmHg1$ʇȠh#CBΤ{sTQ{%w)7@y1K^ ].Y$46[B-3%OONw8d`Q4d$x0t8@t]y1T\YAidtxBG:pɨyeNg4n]M؞ e}Wn6׳i~'ہZ*FU{fXڃP'Hl4 ,ŸqMHDCYZz Qnz܁$Jp04ȴIL΃.0FiO-qy)i_TA|S2G4miBȨHM(2hys|F 94 DNlϒòκ-q|xC ,gKDzHR%t+E/wd#礱ºȄWEz o\JξB.wLKZ39(M +(PWՇfR6#ю3Ȋt ݪbh]MTw䀩S]'qf&)-_G;"1qz퇛0,#yiq$ՁɄ)KٮޓJ|̖D?:3mhW=rOf'/wѹ8BS8]`;=?,ڼ"ϴq*(A7? /W= #^ub"6q f+=^OI@߱^F[n4A#bYѤwd)J^Z{*ǥzw73LuaVad=$6)iI gC~.1%YmҪ+2gSt!8iIۛ*JgE7LGoş\bC}O i ycK1YhO6 /g:KT sPv6l+uN|!"VS^΄t*3b\N7dYܞLcn3rnNd8"is"1- ޑܧd[]~:'#;N(NknfV('I rcj2J1G<5 Nj̒Qh]ꍾZBn&Un' CyUM0nCj.&Oڣg\q0^Ϻ%4i" ZZG>Xr'XKc$2iσֹH<6N8HSg>uMik{Fm(W F@@{W+ߑ?X2hS4-=^YgpUHެbZ!y!ul@ڼ63" ۩:6=TZõ$E,ϓRV|G&$rr;J TtIHFE=RȬ]P pLm|?$%>Eü%mWO[>Xmw,*9.[G n >X8Ī;xW%dT:`ٓ~:QO,}j6j!yڦʲT:Pqҋh] H+&=>g| Z;D8ܶb:! Å{2:+au 6:!fF+0#+̬NY"!6a7#񕪰%:r|o5Znڧs?si/W qEU馥˟^_޶oڷOj'?nc]Rn\t3^邳塨Lɏ"8k8M~?M}OAH$77f|lgn I;.K*!<+"eK5c&`X:#;@B@[(K44sBFu M.MNWLlY]K᜴=/ VމYlϿ4i36$>m|_>9|dUA"{!$jKx E$K3hN(tÊ-#v#O N, 9g80Ǭ&VdӞ5W1!1KYd`,-*&>F~⯰&jb.~cNk BL_OG]Bv.A|'qT(Ol.' 4IE|@Iі)<-p JkQm1 `qacܗVc?)cl*&<}P媠E{-sVU>߇GUt\+n3X]Byoz)li$2cPs6D>TE-n# rve{椱I |p)U݋7yJw&PzDgi xs  xh\L r Ѥo Zt(I >|$>tnMdэo`cZV yBMHVuV_K2k*`cKxuBG&24T}Lai 0Va(7K#ӊ!,ZDxFQO*lם>!4ӥ2 ]8â6 U`V%`!c%؎ʨTzrKh! c.}.D>)d_ 8rcu,wf2?Ǡ*_lDn}rauyFp*ɨ:UiM2r:9ct X1lmĪ o玓,R%!`hGT LYF#g<cm${|Xdu4tmtїUJ\~dc0KcMlf2?mμQ ߉J4WrSHTdp"ӹ'cJq2zPlX̯.0H!ND@UapVoGڧD5>H]f@!=߸2V%Z 0"G4ȇʩ@]>Y$ًF_Mm_Tt)ib+q&EXFu򾬳ǝ/RS>r,C2NfOjpcm{Ll9vQOT>9U;])>6JdbXԠ `Z#_+D[7IIjJɟUh ҙ"`"a ߒ"G̾H`6yiCk(OA/$ ^%K^+(Vr[RR1"u4A.1X0=7f/"(o9/L1X{]q`Ȝ/; 9a>E)XOS K9mUxBa"'4T[Jl /K/9,rlCAj_TiǘP,:4F%_0E5IE'rX-|_W8ʐ/=ӹjhO%>| :S Px„*3_y.g9| ;b`w NtZtc> ײ1KĴ{3Gl& KT1ZWX8?C]~We$9; -.D087?1a@P5B,c}jcGȱ WW/ @a#LA4.ٹ^XڋXٝ:^Izq. ٽƎDn6ٹBc5Lt;3#i3RAٽ9| cbpcTfp> 6L/_x 'ۙz7~w~);qU9GDT! 6]c_:VlnEUdn6UˇKU;V`JUݵޙEO[)ܶCy*8¢/[cչjx&? ՃJȚ9!j[~[' "ssTV2i sLq>z@JM->=@NỲ\쀜*/) ̞r21.y? bO]3?C!yw3ޯL_Su>o>&lrw&i"< :]_<<7U_~z5є/rfn͝MLmc 6&)e+n7cyy{_~궼07R7wPuqpqo{ߟ+[w_uOq?u-|?WS_tOq?Eu-L_p?Cz .e ϿO*3 `Ђ6a-`kIf-s,RL-R`1eL~dپ&+IhYRczr?㐟,v~,b6)up)3K,RLW"Qd9JgT\1f3@Kh% a4x,kA k ^d kYj5Ah𚄓vXZhX1xҖ51Y +Id ZZ\C| fD>hB֡#-$+Jpሟ,Cg:6 3 xH "}C[`ӨOAFn5ʬLHϰ:N@VcyBI#Dr. "h hg ۃm-qu>V&൘ G7qi#^tҒ[JI!{q*lrD܇Gk@;oI<5xZ4xM"؇'k!>V|lk'{d+ :sXӄc)?W`*|\v aVT0"tMًcΒVz]T.C$cEp._0M`AlF̤@U' u,—rw=3}resLV&ԙy=Ejl1#XX۾;R;+[$4pjfљ lݍ3)`xvcZRT\%fNV Q)nsX }plMa~;Wi+f{v%Ζ/K 8WPll{f_WJ|8(A ä>nl"jF;/-R9~ {^'##AA:s`uih F% [U۴"qkjXS~+(f?TT)*qy+QR"tJ8۷)'3J1>pnVGITq3J&J0CQ v&P_񾅶X/)T/ϧ+GJzApU]<:Yn\~%&58IS)`0効<9ViCbw!bX%E+o*ƾtNU*v-zߞϢ +4 {e6J697@28MZXc Ub+A_Aܲ'SoO1ۀS`*f'r[8ݝYvjҩJ;}]|Bޙǖߔ 3\ a-`slԵ怕e7ːزoW|A\Qu&'9~ l|`pΕ [Q =r#vQu0 M.1%]vRat'IIc(Irw~Z"+A<sX4*X FVGA<^^7 vq&EwQű:؁6y\QbR9GuB/S5^fa;N(hz)}_vq@nu@$_DVH|08W12e_ʿd{xlzUܝlNDU j>zƖݗ&!jC`@ qэ-V Rt2m%K6dX)"]lj齔{oY:8VmS!:Wh#O0} :OVGL.xllT_oqqqLec2p;Ndck[ Rh6T#0H Q}ppS@ώ@#gƖ8sѹ e^ CZLu+."T#yrHhlكʼE-X'I^=bKߙԘ1"+< gb`[c1髰?(o$[eR6uOœ-m~)-&>883\6y 8V -qrG]~.3jsqY~ sjZ+9[rAJsT=~#02ݬf¸9Xe>sY~ ae9} x* zjC.5Wg󵸊y!1U:pU!ƔCm-7^w]斻~[hW$k sE0ڊSq:+EKٕ|dvvjjy6 æ/ML-yz,ZlQ^oAn-})xǺǍ--qcl:WLg ӁvJ[ǧc~Of+8qpçco#rCtKӫce0!Y-+cxMK-H_2:Uu*corD~@N`#m~R:ߙ歼!IZ5>H;0ޤ:\Tq]_\_>e˲\oUQ\Wߋ47WwߋKpwSSۘF,nC.\UߋoVEuY]^VW0R=<ު˜˻ x}[ێ'|;c^ M7 >5\-> m-8NJ\ALd!>_:h/NAC;?_ξqĎ6xMY(=ͯl~l8V0٨T zL{Ac:&$ ^CpH*DW\r2aR|=(L X1|wrO_g ux1^^V2޲jMi^b``Q#dBxV#NBk1;DAV$"*1]Y~ d->'I`{W6_K(>XǙ/N7`nJ\ ٞ~,;[V1bf^vX]gwWEm%1HcIןl)*L.%`6E)*Iif~jPhq1.<"*D%xRk*]icS-1VEE!LZ'0ZhXUl;SYq{\~)ϣB6Id x !_3?Z5H2A$Gk)Xby@FO bqU7x7?XUᦝZ&.6Aatt9X AO= Kc|x8Jg:Ӳ^2k)$zg}Z&侀9NJ NI={:%e+ONݰ 4 =0"__s}ӈܗ؋H,G/+b5 ;ssuF ?eqe9cxY\EzlAgm,q5ω,`w-׋Ik 7KyYʢjfj,y#r~+*2,_0nOZ 9"oS x` cGS=tg^#Kf ;yf /H'g܉Bn$qSW! \7!>8G%C|:[b(~adjt9;QEnH~w3VΖkK5&~SȐ* l|6\i_\X {DhfN* `Z`&}oiygyB?aY:y\[w՗d0.Ǐtӵts<%DKkT-8[ -b#d1[QRZް݅"wGBѨL/Q'.XY,yHff[QbQ(c!7m`XΓ|@%"Eu"db?Qi]DSX(=&Y&UGU]*̈msYۈ 3-# M˲0b?0|ki4+!OƁ'Eqj)RPxX: \뫊%*H%|^k=˗d~O|ikִ 7Ʉ&r^ (D O()jEROJV ԋMA|b2]&l,Ĥ/.&u)"y|e_uG]~L'BLMS֯gtQ=kX}YSg[RuUl$g1IrjM-gKiem!I5V=,j? 묻f*>0oEL~tw(JdJ&q۴~ QNG,zYu /hh *Ia\=`dNNY+ûk󰸞t6wϓ3'h+D6g{ߛ'ݑxmSJ :NQ ^V#4giOy%bJi 5WzygCmG-KMjfqz" m \4^T޿9` @9eyo$>c<Ô gK$gHRO>uj Y,w!<? ԅ0ڭT@;z.J3 sCC~UQ]Obiai7E&*inN[Oo?РXs~! OQm `؆(ߺs7+N2>lfDĈOTM$`?U$k$̩N g;Գta6m*ZjlXe{kٜdL35˯jKgc ~c,4+ ,X,}_0 O28OBT2I383 Gqbx TMre:^9|p!avg}` ']w" ܧc9_,s6EUߚVPtٖ(kRQնR7ʉ ohFζSgmn*}w*a$ݶdrv> ˓k0 SbO8Ï0ȫuՊPs= 3X(nRb(Ȗ1tBz~&gYu t߅$V0Eh]I`^҈~?j]MIk9K?=&a 6aS 0S,&#sOJ8ևlY[$D綊D-F:m#GP*VSRo ! W7p4P>1+2aX ;>@qu{< ywUі9+EР(IBI0Lo>x%\dYb&>?Q'TPi'!hegϤ|n\'$PM쬹CU>7ߠIoOޜ P0 fg (Y!.oVvꩮS!o[ 0(oj3lVzS6k뉦1ES 5w 4-ڼfQ|5 ݼo5< xxQ ŲL|K˺X&Y:Q֛XtA49ڕEqyx&w-dQzndDӽ3XRUA@XT8NYp`VVE40M鬂dfӠXJ7v{hMhhly,O*@i˄FQ4`K*S8` :@׵d=c[z]ɖFT~ZJo["z*9UZX d[@tZࡪzJ FzxtkAn7 2 m^ Z_꯫n˴T[f HV@dT(f0PM"+WrL%*?E mQSW_u>xmNܻ~V= aUu08< k_]yJVWBEK#nA,L2(1'ʌ#XܣKZuڧk`s]bC*nMiu=oNLQiF)^Cq*73 lHBdZ~4Mu!@UoҺ;M,1قXhuȈtzHpH֕WrjS ]AP4EYfe@ZՖx |P5'"_Gn9Z%iUg(RӵR].R ,$P*jXg͵lGf8 346ߞP|+֮W=ט_ :XMVT?݃1rlWO.(Op# ?.uGwJB@bT0+okX8ij_WK"ُcnj?~e ٛy0OCP"uWh@!Lt+@ b8!}-AL`0@!8ơ Olp=X~XB@K84MA;nBS AZ/eñO ܗ "8hc#!}`6DF: &g 5Pf :08pcAONy 0W;I"  eXqW>dG92d0k0'6Z("C{5bs'kiZC"yC0$Ys$Ã"$08$rT(E Fz@p 9cO2D/84dTRB}{FާQ"Xn? x0]).O,M8ÏƳ۾Ruw nBiq18m+Ù׹gI:N줽&Hf#:Ry.@R%;@פDob]y 2V~^Qn_ Rq (yo+rQP*d*jh1nT"hVcLzvVפiV e[uLu=uzXO4U=Sw2=[S *g?i~I!~DE ރPEu8*j?}`~S8DrmR9Zˊo]`}V`[ospYN"o߿=ܷON_N_tp˓_82yR.(2Y9w@1xotzu@ "+PL[Zt$xεы nl1 vK@/~(Z1:Y,+ Ҁuqx^`n2{|9up؋{9=LR#19ûk//҂]8( &10 iTxS톳B`ۡ(HEG `/`0:P.d XOБb̀`ul/2pEa6pzm+ P_z=ږCn9<erOH|7_,6Znc sķz]PN眅n bDe p|ޑI.' 9dj)l@0%_;ba]$_&W:Ǣu}{-i?]h ({W`EYJwD Ӎí@6n8/}7 `e'`hl!)[Jб>A N7L[A r- wR[`ϷN7~2z ߥ~G|<&iÛO0vE0t"P0tHX0|ϱy N9 fpծ`lz x 汎g~Gb3ioI (;[ .M] a !PA<97A| |:ŕUG5uΗIz Mx. @g& -z _V%t%^}/}䱗˯_K끢bl<4aZ z:n$YI^H^Z25jiܡc|MEmG8܅k}^?0q^E^X$-}ks$J/Ŋx9 **l8XN<2e0C)w{N̽S|phq~lL۹?Nń0$4,<-AMP=0+fyL/XΛ$3iZLR=e̢ "kkf_{OfWY:pGtFar0mB0ᕣTD-Je?%+z,SpkT emJ9(OЦ5c5Q mDDqy[0!Lkrx@~Z,sF etߪG^ʇpq1]|ӎO7BKGD@})*eR 7CGi>GYO%l'r>`z eq]x胢ŗ\˪ dψ/-+  Ύ42MA |6e+ћ軏ͷ X"`U* #rH6. 誻7hSk3d0mg˄B“ ff$^ut(qYLUq )ϋ}b eyJ|zV|x wznEQvyJ2E{+iX UL*u=\Z 7ku-7kX`t #x+ ֵUj?!Z\WUP+y Œ54wcWi0`iHL0y/^;xdww<滞G=rxtѫ`F|p[Lq ~ry[63iwg7rm}*[;N'yq1؝s XnnOt+و3ytwnjfW` ׯ.NkJ.@NyqIE~ʞa⹒.y\Gj , Yzԏm{ĜDCO19r%`8)eEu@׋E ##):C>z4kʪZ`<' XwdiIG*  i enz6g?Nq#4hP< (\dq_ȫ"-7UB eʞH(ۆ{]ʶSNe;*{j~nճO]ɝMUji廑ʟH)߂PJ؂P=b7B [jjoA=nO$ނPgPg B uv#y"[nO u [%ۂPo{BH*FDB- V 4؞`7B'l tI4:7A)Kx =Lj4Uͭ^:ߡij i~>ΆDn Bw1qyǪC W#2_I|"TN}lJ$ngZ$*RfKo*0$wѸݧ=(whqpno|ܵ17<ʦi񾬟?9HʔM7ҔyFlL]P((yàh0'G͕Hqti xŤW@,bS`!U-̊<C%} w \8 c&yRd9:i- _nUM]VH9KcSN <Ő~2e}bu l[aOC0*pq&r# `BПY^cl;K:aUHYe2ߢBRE$#614\y7Q,uMOxeE= QŴlG}STU& {^P&R%)LT?3)kR)Űzm)|g2̸:靂5MĬx''YRM2l6 F\4yLp̴͙qx<4 YuNh/W^YLW?,Ψ(Of#0͢+1%dƧ_St^+:!'Y4_wT({ eGk3G?"VNJ} y,<ÿ%Щ *ʹL"i.2iO-Y"q6zQqiH hrZv81f")o!,N#FBZ/eD-LDa=>0/7X]+.-Dg LiV!5pD͚PF<BC2Xj )䵖'z&X_ӳevllD'RdY;8!]Zޖ"V3|t+Ջ||9[JC2R03 `\~l"EWv89K}4}::gPa ȾCET=lR- m}D~J3'%`q e_ȴ%9۳M^)0V췆/Զ߫ xtj|sS! '03TmQlE3I439gEGYibI14VZ݆SՏ}(!|ɡ_$U}ˎ&`-KyCӚ0E7 ŵNJj׏LZGq{ t9ZǻuoǼyˠG?vl:g}V뷹ޭqS͇bELh^, a> rROe_=qb)1rdS)pf_|d@[,NWx_,l.~k#3"\@BQ[p|7O]A/6t`h|zM|R'G;*y7A*bDcm/ 歭I~偣h\("&ѭ$C{3}B~{4Q>ِlU[?TwW62C|1_2Iyvӣo>1}pzb{KKq-?s9Y|&gv\ޣK/_'=vn2/W /;<#;)Ԭ[G^n*:(w];@7׻&r&/pP\ʟ~\LBketg 4fK껕OIL WtskӠu; s8Z%dJ5r%őbqi-'{&g|gc`sQ"1YkbŤ9 ºֳP|*Ħ|ܭȲcHH $Oi:靨LkoTm-#mWͯX>4hPuJ">Q˒y疳M81؃fމe'vLX*X_ ַ^gsQU8>}zyz;݂ZMr`X1-c^FϣHHPb-_)p4͘Cx6\YJ3-A{Lw I8oBW9Āx# %,#J8m ^;cpa3 g6ǠaİA>@gzgQM*&C!_`XA/m0XN$٠Vkp ^YYfl m_F n.6atlcNin:a7$"Ĩ6F}Io#NfKXJp4ZpTBvaH'cp*G=SYGƢi"[ì}TTpc3 b$y`_J:UxW(, U= $Fw1.+yw&эMVBX.@TW sx)ʹ aሑt.m4J+%tx_8)h eĈa5nwSM\cDt7fɁAg/1H"n4Wc^T& Z^U=+bS-*7ɐS]ds$x߬hB`,QIС_ޅdd!ސx ލ!:ˤL#I2k'pb$]}~j0;Bs )4K_y2:5*F;BwXMZ :X2)&LYA67>7kRZV nUM>Tpa$kflhkĔV'feZ!m693]c"K1z[F_1.,rOIun`ԾJpUmJӦt荑$8jwUW)uM,w8I.S^5ur1]ׇ7,4zQKy s8GH.bdW)>BZxVf Z-jXH FʍyJ$8ʱEL<)Ӷ@WbB*s4Ԇ1^Z `xU%Y1 9KF'p5&ۂT!t#Bv$6Q&MkSi{!MŊڰ$iAp4(B!鲍ێG寊a66ahY\K{ߪ]{q$8&)ࡶlb%%˴ԁb p$c+Hb =$u㗲"-uUL!x#RVAwC|Z<:Y#7YX=suC8c16@!İC 8FdXd Oԑ$X..%R.~zLr RYO-= AyrޚNZ`}q,I&%$cE$}1EU)@8W2/KA2r[G&)IɉOC),r>Ik9m)CeOˀ X\]"Ą||qG2I:ht.捭A? z OXH<̅A;Lsf*jh?Hk|Ɍx 3_74E&y\L{ZY) U*MF҅|xܐ8x y05SvR]fO|ud}N{:S m׫"p0vFQG;,n)ףIjV&G e iQRIn7t 9 cx1Dw^ 94`ɖ~l}+sUh58,Xlڻ,BF#6F 0z UrcT+׹ qZ)Pl=E: =[:P2[(paHp4׵UQ.I`{NM^H&G[/d֜cb5/(,nE__ULvVW}Xr67Y|y_=ݒÏ700Vػ4o$v-KW\F%0+.I##ӛ-yzvY{O`"ŭևէpgza2 DA "Qd-99F}^Јk[ڧ zt@ Rӧ^!kv/7Dz(SȠ^sh@쥔6-|DlY>ݬ)[W̙9d;o#ܻ-Wq>A"fW0``h{^5v/m*ఱ hK!{JtWf Y6 !ǯw7$E0\!S ^ X: WâB!ī@wŰwd%oX:|5_)vXSЇ*9Ygfo 4_w=9 8op2!;`7x`Y#f`x}1 fiZJxr'XĩI~]:n QmP՘X"^^Y\H7OGGqqrv8  Q)=I­)iY~ E~&qR-0 btźgqUpqUoTV5<߃ ɲk,ƍȒWxo[Z<ܒaZdaԃEЇORy@L)Ge蓠z>%j'—SCq=ovacaxLq:oL=!NH ;6EIY|LB9f% (BgaX$ڦdԬښ]5~1.l~Y{HYdT'Y$#Kq5N41ʆ1n;;$Hd8!H5HMFsZBu]Ko Zb|m&2p>7UtA7\{=K&,M*\6vx1fc8<+m]2.eFgS:N~Srσ/2|#>=J$slpR0q}睗eec;2?88,Ha8:6=y]e"W$]~_ӳQ޹FG CvїKCya"Op KKBA2 ] BI(8O8t)a]]%ʶ 'yғ:{2 1Pc~_Gp8Nd hv.`gFwp~D9Ux aɈaGY(Mf1}eI5V=#G!~ʺ)} ٻAJ2 xAO ;+8\nfL䣝uJ~ңZupQ2io͘5귶_Wlc[QZARW1q~qc[6]農m NUEjm(;7 v Ӹ77 uHRWykϑ37g> LRj?Ǩ#b܌+Bn+漴,7ZbNկ]qpxUK "M?-|+sI&tAXT]5͐!*Ե˿Mwк-Ryr$\t//GSe]ň=lʜ :?~<9d*8:Wgp\q\fɛ-AhjxwiNܛ$ͭtUlq8c}9-rV+]Ĺ , ~hwy3h}͒ڴ&o5-y3V ٫-饻{KGaF6&YS_2i7{*tSP-4&oĖx%m[p!D؂ a xLF-Vl 6n oK f"%&.mBIl6P0RKi 6)ӚJ+U q4y M*FZ5Es0aТ9 4z۩fYm'r "@qM{x@a<.mM)(%I<3j -vLH~h+vM:mQf]1 [ qFNRfSh9Wc!4Y/QŸh<>\z0R:bI7(߿Ze"WօQMeš!Z&cJF)ZOw\6(zQ 녍{k@i"(nO( 58 p_ǓhbLY(^ΑbT*~<}wS3YQ`*'-5ǀqw\ 1OR0J>3E{W\E#ZX,kzYInTR W ;w1 0ҳ&cal'4l+ R_P< }t2?6 |#l?y򿒼h k#4RH 2n We1z[a["2/Hpm꫟55+_j(`yčl}f}٫ ڻ{4i$Hl&*g-jCZ\M*'ƿ^UgyW@& K -xǍ*737^#sj2OϽ% q@m}kW@l'MkdI^ ˮIO#A~%_G%`esP;9z\~f}XvͿ̺Ijw,z?۴40ΥbIa|VIa =ZuxKE$qi-IZP{qx ϵ!m cpt EӮ<5^i!.,^CVN"vYP~%Wy`iyfLCU\+L̐ĩ.^\M*th˗OvG…pT4`Y^wrJT)A384 B\&(Iu, Pfݎȗ,}?iӎʻmꂈvt}R< g>o;-pw@3U^Z3%RI5!aSn "/&xY[o3f4f4f]|眱$U ( 18g4:j,*)MU0 K:+kFӎiF(bo4cYc3$j +IgysC!cXOUpD X3vԌ3v@3ro4cY~q]n D4;+sqѠ8gbZDiV3vh5[H{VIVv988L|/pt.H=~ݔ9ʒ^X*a/ئbnOb|sx ~1yoqTZ-ƑJ(3>%J >k;UϞFaUoYxTԣCmftS ͷ6?`l,Uߺຮ7I waUm6fO~2,7}ξqW8r϶ A'#Uk q5f.זVme.{37ָp6.iVmἶ%oES0_JI ɳG2RԊCR*5ZG=| TlT|L58\ޮ^#fW=Gim_1H-ek(X5@놬i2^mf^ft-Rmf1Zݫp~q6QĢ*wxy e8+"f1Q9n hGseF-1LsM)Q{!z>$H'βf>b.G|='^J*Hk" 3eUi(n1LQ'16 Q"*IGfMdPqe j$Jc`!ʙ\Hil6)nʖ",xe<%;t ! q\e$ݹ"Q.ϙkap(cV'EbT\h460Aj*QDk$@+ hs:K*@ DRpy:A4yh$lWekSs  fo*^\,:"@pBE.4T2Pt NHtd c,onZ`j 5,)lf3Q_NEqfry$0ETs!.Jaɵ^Ԛ5FK#@?i DP`&m(.6)~Vg,0Rb4eBQO"<ބT5RF> Ʋh#16HZ@V9Z67(.me 였Ԅ&Fiu*imQZV1/~j5ER9<|;`QP:Yqgcw{U=v> ~|'s؊su1y0!ePw-mI ƊT?rl5l6 ytD.I9W| "ER͡7U]U_TW){d5. @F_$/gPmjhu!T2Ob)8e)"'i"cIyBY^tdȚ-2:n%jQk?ܣh}=U6ޢ5[ f4uZi:@%#59>Un+֎PH,͊$LȌnǚS wW cOsd%*ϳ&$I'jBRPL <37?R5s/ZzmDVwTE-@KIf/ (LEd^ }XJ~aqnl6,`y] P{ g]v{ϝA5?w+I]nqcGl73Ej{4@ZnܡNm;ihx{ '''?מzeuefeO PNx#b>0 0塃\Uִ iH2M|&4?\,Zqd %mk?}#z!!x |- u{1N*a2wVJȟ `O,\ u'$/$WIEis|oo[}t'y=?&7i,MP|4=t;ѻ q*81IQ~< ]OnSmJQi\1=d&ce(.b?<,zڕLߏe<\`=E t{.J7;dx_ƨ}Rknk>OJT=6wG*Ɋ@,x* >_X'6h54쳲_(#(7V~{x}Nv wq ̂5[-}QԴԌ#;'݌fDެݼ V[%evLjUCݤXo-=߿ow?-3YхNw8ˣ~EtLz4@{v΂ï{_lEdZ6.lPx\+.,C`8˝yAh)af% a-i^ZA|?rr7cHL.eiWhҿ]ҿ% Zg^7fOvg^ˣCݰ}O<1E1F1.%TMPnщt/F~ =6>ݛ ?|?[$EXS~g|sw{";w(lOǁ|w>ۻY&&L-/emɟg,]]ֱd;vvuq;o]pGם/n<F:w*'?ߍ:O WCf0it-\nm! A}gb6bTӚ"{:6Wl9K7;٥hb^{{NPg3aƒyz& !~0nxa9yE!JgafO}Dux~c$0մ-c-%GZg/ܟ4|H_QrqWث3hӷv֧3(t;*kF'[SqrܑV >KN|v@2fǟNQH> s-ϝN, _D[d|k~_'9 vKodY~׉~Fn;]w%Jt'P.7fxK9姗-콜!_vѩ(OEѳClw v`}B-=wضow?}]lp[cmTsk"Rz-}N? D8)Knl3!w;$So؋M܉-[fUgG.d0=ߌ 3gv.>Ń |Qqu5ܶ<ѽ m٧/Bඎ`/C=}|׹!L]Ոo%;FO9C/?&x-C?:l_ ?PJO1]}gob{b*,HSy֐jdA9u# tYr:3$T !YT yY dB$ϩ f`9+vrNT K3e'[}D|`rEp^X裴~pEN>h'=Duԗ˨%%oțvlmFCtmhM־RjKI1~nk7NGQt +ad7g'IՖs"ËmA eBw6-O+S K/ը[S 9 ɞ*zA,?sAH 4497 T `j`=๭pI9 4 Jd?f~pM^hm`F]XrBTaPݑhrE;vt+m=s[4q<ňmLJE}@f.]G-Ǵt ت~V6#F@6fCOWܭa}C<.1M|"r;(/Q.MӶVm}5{ߊz>йH2u/[x'#^0CÒq\0q##j)W>reV*bUyc _s} /RO+WΆ N8%ظHJz5c*f _39yx$Po0u Sck n^hQG°=b&N* >^]90s=ið_O~Vm*5knQM~mYŶ2c Շ_o8VT:6L"nO.Eܬb'4/)) ~0wDž=j}Zs/?F7'^k^[=EKZGeLC%}T(˺)T@&Q?;tsƄ\~[<ո)oL2xj?@x*FZɢp')_9Y7c ެ#Y՝3z<\Oа'$|Zg;N.UBMU7CRbYt Fꄔ汁*"w8y*-ԭᤴռoq}'4|9l'Ş/qmkI+M9GZUrE iCwrmrBo{迦C:|5&GK;!k5{]|urV66cwlʄ }UV&hb0g'ae3sA.qn;~{'z{0II$ lM U0 9Dՙl9UU\ց3:KҌ^\tXbDd|w~@ƨ4vdЪƸ"'c@_1V9n/G1ڥ-bHlf?3EF-Aob8eCr32*ָ"c&n%U:5SW$MR"`TAKM**?$5J6j%A1S䃄1S$LmjW3m$H더RsJFj$ކ֕rnػREtT mhoi/GSHWT(4T.UG)Liɔz8*ʉ7Ŀ~V@^ zJnJGnק$`ì*VI*T^0&3)T6I`EZI0!Qgء**t^0Mg,L\) Zf4,GʩY}ABӧ^?4U5XI#8 Uw~}.k7Ow/Btw<t>Q5@h8Dػs{g0wˠtV>VmAumea(B&,CepfXb8=q٬.Jt6ݛ?l=R Xz>?9{rk\Z0p7!IL]_<<@1'+@"аe7S0At.4gUosF5jrpeٔ_EJqH=gQ~O>k*0|3DP[wZDi#!#֐$:z$s$7=qg Iގ@<8(ȤpH*GϑG7[??۹!)s[E`FR#Чc $M=aY^98 z*5D!uRS,dotzsֳ*{S[D" Ti# +Z{mP"hck1tخ? :%[KL^zB >j[8QGNDI`4id10bys~M8u-v&@qT%Dl*MF6 p4WdU7/ 8oH! @%GF.m],Q.|SLfHFkL5,TY6|!6ѡp(t+慃֠Q5sKҴFW(.WSL8ƮҸ܆]Ԧ ubW 196 r>!tiAUHƜ6DVF+5jA5A:h1i{.Aŀ G9ڻǾw%wf`2Dq6; ,Fo LF@j80v5V -+<7V(p+\)Xaȗ ,~/KWTQ;Jq(¨ΜB$`*L RyHȹbUIsy0 Qjr #<t) 5ҙ')tm ^`))n/0vǛ k2$:-%C) {|bd]XT}VjL[AkƋ*5vg\D!l,~:D%+(F*4CZÕN&g;]Z3e=$?\TFNEWS*n6tJ>}vRq92H$qZ& %/ OW`UZk]iߧ~H}ҬV|ݧ 8`AlQ9Lߥy;> )udNY ^'] n.~wï^*{w=ꏟ;;paK88p￱1^o/;Zb+>[^ W'|jayvܾygޭ'k̮=Lk3[f[o{rTuI(OߚOO5>O>}x3=͹|d/0Rp|hyEPQ`x߳+Tb "H\sMv/$d qFƔɇ +2T;ڶ2yvj[{men7ǟ/IP%Ұ bkFt.mELR#@kΘd?FSw Q=YFp(Zp|oڭоܡݗ15W*(%(0PSYA6*UP /4)2( rPLY ('QKt7`$j)Z~ =S #,p U"/lx{3GACL^`d1"4ȑ\>ȻVkqCPCyH:Sd\4:!Jĥt~Q 7~DBX35oFM Ƽ7-q }8^!C;B"2.!LjlS4 Xғb,9=b!6UkS s Y=[#%}Pء<5(@)/)~#eg!Yձ?^5IU›.g8V 7],H@K}%BϻoaG0fB^r;^^\? ’7!i4<İF\ #tx% K]c.,S(>0T$i#ˑ>V+}mis[37nhPm6I5:ra)ii& (RM@EFÕLǛT+WJq=tV?! ך_/ǚ?>"ZLإQW>g^76AC7 p9 J,ϖ/ޟwhE!ol|Oyqj_Cg _Kջ]*:-xor~Rmo=~ϲ/>~yߧzG^í~NR3\t#xFd֤osgH{|U):/E~??ѡ13።F12MZ_ [v=[=٘P=Ogzru{yd}<5>z;çԿjϾ6p^G@ ,X9J/ߢCFeI؛a?w߾sc1yf.Քoc^俅ѠMuN+ۮT]s#/+@g-%B+G suuݷ ~0f aSBNMYYF{{֦T\턫gONzzwbZX}.>:v OߞqmS;֛ӲG{ }jN_^=;;Oic;Gdfr}u[S`oGp9s'e8/0m+:}mt.~{ oк6MMҕ2Z'6P *V 2})~7<ٽcuXg;$"QDc]T\8u\K[G44R__:dL#KJ o+ mtOa_,: 75NlORQ‹i7Wawf'25vs/z6lb)ߔs ܯ9$otZo|~f3m1Yzۻߴ~e|1R""Pș׷;"7Z9JR.NCѥp(BA.WĹi+Xz^43?m\e!=Ũd=Bw3 nŃ- VEv6Ѧ cii"%gݹU:YsC;Etш[>Cgi] anRᐝ5-dzn.E5 b4FP'0&# vw_אAZ"nϭRo^M|3`RFj,y>nμ*9] {СG9c3ɞs왗=ӽJ޵cٿ"az7~vwL0F?0LdBl-Ig%aYQQSqGQ:sK*|O:bIBk5! p@\=BGd`1޷M>5]پg.pe/_B]T(yMN':rL{}9Bй>{=L{Ybg$ o$:Ah,e/QV3]u{7W4XYcwd fя!mˬH7whA?}-^pSj/^L=L}%}"|F~>hg - MF4~ӜHПmt^/8DIA}Vl L6\uLg~!{i֜rN5%l5nnu=oVcEǧ$bUb2ʖWrUrc&qozo{ crsuFLRtEw)7/_'Bzt3 &K`2λM(lx[k[pVZc W 4ZdAlCH&mw?[Xw]NW׈Nf+ )饉ܹ5`O- Ufih2O_ON(7-% 5+)ȷwi}h7$T|?%J B-ޭJ׻6*z;:9]c~,g4ʻ7 }W< Ś@RAʊ@4٘.0Ùiy4yIP[$##Xba^ނ?]4iӖ:GOR9}IrZQ]`ff)A?f..ōוvI/W"I <' .d)%04 x5RNҔZ-peVAj#ձ\/*ay[6TX:W]Bֳ%!}{hN&mQ! d2eliq! ǐB&o9@O I[K |<* dxT~Q%˖E("VRFcZVQ$lPvHxQLmV/)k/- $K,_j9Y(yGIђSzl9 :A*|Btfjm)B˩xiodؾ㟖(蝴W6\2Qbe5L֪r$#Dڎ} LI[E]2b2]6uxFSjyܧ2y( j^3hW6I.Fktzg*Q{Pһ01*xl(>"w( v;/[ \葷ʖ]17-kd}Xj 00(4Z<ђ,qW(42/㈠2\k LMiC eB-yh%h v7SC̆ <Ӹ׼V],icXP 1YZh<'u)w\_@6JםɱT";4m_46-N(<&%YɶOm>R2C! G>A=` K=<G=\K0\ۥ$( 4h Y*Qy %& tliJ3:ep?YrNC&k23jk1d>[bڈS!S8ihg9ok >B Ҡdv2@u'W Dj0?8neV_Mnb5hR@ty1|p2T4<(b@Ϛ<\ OwF‰KMn8͸vrϖd2%1ߺiB} c>w'QC1OrmYD.э%\ť9n<伽]61 _tF>!섉Rs/~ٓep =*!9D0VůR4z!lh& NW wbZŤaMn) N`;fep-&vheVzOi.~|ppcˏsDp p4,7ޣ{:5QMo&9=Ls=1_vfo[ 4B<"ǓxN?/Ekz}q^("XN`(TX~뇪Bu2>IKp MmIi&>L2K79lVM\Tfm[T]cbޟ&!K#XgLž_`HӔ/u{|N{g|v&Lf nOr4g^&IՃ73š*RwlppWm}D8AH_WK|.|;uׯ_ϔ"::2^A6F \|UҬ6iޭCIUNg5m_$./GӉ.o?r^7v'~~Vܦco~UnR xYㆧO6HJ@MslT:su̚Ioqn~733]R&skZs=X3qOTbI_ӟ_S/HFpEvhw;2a7wc>#μC'ލ+4tG}ˍx+8OB|6')9L.꼲1ajL:RexpVQUY8"^ZG |eNOnM8AbNp臜iM7xM45 !z W8jG=!*XM]iTI΂6\JU:"2"hHŖkPi*W j"'VByΉ ԇI&|0PQk>}G&aS8))_ػբʇ tIB>gڠ5E [I\M*+1x mD+Vb2CLe1x<:R!"ehFՕKkdtFrQ~tuu3ڤKu-Jגs[$5# ~)!֋ MŹ*D&LUAKD{ύ A;C=d(D;˸Qv542o𘕮F8d)5Ca"E ^]BEkm ei ߂hje&F e1$rV#}-'OD B}$њW` | <9L8#s&Rn@ ?,0!.% D_)#1$ac.!=$X"B3eHm-EO+yc|*U4(FYuP`;r `B{oSC&;ˆCNo+q+yV#2q[S"UJVb!ȽE0i*YIB(.`ʈq"鼟JV mH@k/u4  z**$q+U?{׶Grda0Dfd ,> bA^-B$ [&)Rd*U$u7NeEFʓXƳYy/ʺ@JV'3*-lBb|)Sl5J.!pBY$Z*k HvV(&X =&VA  \ 5Q̈KUU;f"瓎1!EUj5!䄈]B3 ׫9?ZF{ ޲n}-l/S߅L9hp"f.>%‘br,}rrW2)3(]I,ZUF"1,< Σ&@M&jZ%T^S0> J,~OC2Al(kvk < o đ^,:^ɂN5~T}LU;F@6tB_ !Hb"b~x}ƣyP'KSqELl2fbM=%DAԜrt-^, @oBD :.%ӳEhv0F/Z%\-SBGK ٖpNJ@&c8ߐQ. ~kW;J[`w)%8%ʊzysc~}n$L[v_;^ys7n/^x ?W/#|ʲwZ]7@ܖ?FdU^/H;&z=BMW#گB!"fu~BÂ!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B uMߐPyEB'芖חm- JBߤP'u                       η*AW$i5BLZP(o]weCmC3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:ߌPӕ+ُTqZ}^G掠z[oϷ_5nkmuh~9;:={ ˴՘{kKFVi/ mN6Ajvʒw߹UY3D`" 01Wnهۤ}I$Wr=pPOqp^uWr=ho=vW(׫(G$7F5C;ogv .^}R]C_}Gxz$޿_s)O_!_w?=G 1_ AKڋ:繾V=MII55N2_m7w-(sǖƇ~5OvcTNq`Mw<¶\?Ѷ\?}w6O3uۑx%lX~CcTT1/Hޣ`>OymӢϻ{96?Jżq89É͑u[B$k ilMNʹQ)3z]>$d,=DWϻhS']UYS9XOgr䫪y}{MNthV՘YcŒҪTh}s5Z#{>3i'P[_j%n]U*u&wZĦ0 FqĘ:ŵ"GA*͵bDMԴi]^u9E^,gbh>ޔ~svj.zwTz֊Zr\ciJM#$B"Sm8?zɌ] ٝXds1\.4f9{wEZA# 7g!hUEj75:;TC))IW\m%${$ף(]/Шc;5+QMqcҩYJEWRH((E?a̷)뜚؈^:G( J $Q_"$bBڌG6+ԨSy|^Eb˗@r ` ɨ|S랺V)Yd̙=H4MZnPB Bтw եfQד6*1eƻ`4\@t#/-jD OVîWn^kap;^v޶y,/LVXb %dp.C.@kBhcnue/xe]4o1ѱ{\dzxKd '\A2;X9FQZ6f=a=g*X)"6r\;wMAQVqV6Lu")ưR8!okpjmWHՄӤhފ/h \|HVG!htPk@oJ'm4W`^j+(_豜l&̂6@,-V0!ЮɻbP57]\Q?!70c)hlu)P6W]% D ePDdE̓_DUt*[[s6 : VVm0L;*r@TLF\JNٻFn,W~ZL|?v,`2~H_VZI3*Y-[v,ךhmJU<$ L #Y" V|঴Fۚ*r"AB- HSQ^Fwn,d$;J_Hh-&8(ةHH5ɮG(֚ݧjspR4\^S{D0j($WxQ `y9O?|lI4" 3Ly? "?&6)iѸDyQZZ@BE$;Pi d"Ezͅ%'Ltv7XF<ޓOӨ>(svCx[#2(pGp>tLt*#3ÒI0ۄl2 `$a2 b~mrV} dHIm|v!w0 HaR˄ju!/V%X;U; ڎ=@y7Z,9}^i<  <%"`2:#9^nX}v.D|3\p)U"XHGB*Ѐ%? 0:oF+r2,7VA+DO 2Iao"&ꬍ5ȓKjy&t VѬd rZcX'!GdQk!wG=@U o%1o$`1SYhIj:T\V\Ong %r^XMڃC1'M` 6$d HS +BwͽT8Ew ԱfƲ<ߞHěRAhS)p{Z}08kL.5,ж#Q [<" C_X"F8>ܓxD |2q˼ DŽ*hIn|[" v7X-M:ע.56CD-'nMJCMH<|w]R,wP@[ QF2,tȳ+ND4<(B= T Rݴml.lֆV0u^E>GA66KA[PW9%u%*(j5vjiDVNny9O[)\㫔}:|`}{r~^.oA.\X}Y׾.7)0 xs]hoKM[֮cBs̓L?fG圬+VjZodmzg}?[,)e}kakavE~R2\n6/\]S颇9E=ԐJڇOP> =ԫCi UPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUP[P PZb^Jќzz(KfEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEPVIA`kJQ==T:J%T$EUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEUPEP_ 4ꡤfGo~ mK V?O!hW?,ؑ,Ce#X~H^=&<Դm.2`;ԭ#z `e^ȇJ6q~Y fĬ>>?Z\;e.+iL>Fx;T } `M@ {ܞgkO5ȁ2K1"P̀3JX8v `%U,ώ f١5L b/ ) ?5<+~ bt1CMŢ,IUg̽ȔOk9?dUk!Ǎ !c=,ECXΉC+81+9JT 53 疰\z\tOp)Xʕ&C˸VZ,j `ƾؚ}wdDG fMvl<6.Gn}?)&;=;=;=I0ߥњ T١8Z##f `m^Mzܚe{e' l֎w&`Y޻,2ɆVKP:U22Xt/~ZyYOqT ͇RLJI@r X!d `fVK,^ZDDZP zSN[8顸+cٱP򭥛]w%c+52g R^JxqW]Df"n>d\-#fvUyz R;uMkv< 3]woF 4Io8.$8]4WpxI>uaa{簚k;޺&hN5D'~q=:i5o9AE_E~o.pVFz! Guvˎ/y&zL1Tjk'䴰Q+4f╟7;61r944gpgWn6 nXNW{7p#Gijz7LK`a mu):4.إ\E;Wf3_b7ѵ(ƙ?fĊ)ۼ)|7o֤J8*b]1bc`B.)O~6Ww2_7[fj8X~a~.'*]]n_>)IU'Z*ÜL ';5ϫ4rF~B\0*QQW"[y-=(tQGݕLvAlɐVet8Kg2\&3Z \Re.UZE4{V{T1.6Ľ}J =kTdǬքQGG w!J$*OďTpi(bw'^t)ؚb:L]n'?0ߡLnQR@}ĐM?ZNj~>-;NIBч[C@~|H ?hY s! 6ak]S V.hcE]3N3܆{-F!L IFzr5ٛ\wLUPU7U\LmDҮ+!k73EXN '묡\ƇU7 Yv^Hz Swn0՜9.>7I{5>-?yl.~d/ד.&顂0DjaһP5 bΓC|^|k ^-~o}.t'\iO&n'8M[/~TD E }=<Ӵ[^5-@QBsҀ7}a.n!Z< ;O7t1v3%J/W-q3Aߨ/:!>SO?/ɎnO_ɷ1>O`?KWpQ]R\|pLz1"|DPGk omTDQJOuLyin gAn >m:2m7ҧ10wQx.esyz9ٜ;Wӄ$6kh|MfqҴف4%vQS<nx:|xz`ӷmtv=n f -ж_ZfڢrM_ܼ]n\4⃛;Wfm<Dr/^+)w~S 20x^Kygzm-dL}yyꪧ$mscGŀǞc;;Ybyy5b\?U{[󁖇˳ՈSW#j+7[N}=얆GW7F@BDi!vnށgH3:Y/#;X(lI[kIGϸ*vۭ=V;5hg:Ż|P6w7tnqyv^~p7۝mik[?}UBe.Z(v>! q$5NZo Zs?ZvO"{}=Dan{=>l2-d(GܒNs{kXҚv߆v AZ@q8qu+"ZgXvօލU5sM/{֣9ū;nx,4/Gg TU[zm n×8(Ok\fo]e~f>#ح :m|'BʄV6"]Й*IqZ$"$;[] X6*߮ou{ E$` ćD-k.Q<6?qY.]kȩ &D˔PƋj}$$a6*ad1XvnדrQ%JFEKPk-( 턲zJ)+ZU4Ջ숟zww2L9[Z1H+~g?-47U;!b2f,m=n0) 8IkJf &51HqsZKR*d%2fDiL WkTEM '&kC4ȼ8=:D!  x](yb>>KVc(1!dLd]HDvE {6susٲ%UT&RuN&qE1\2-oaC^G[%cr»]R"vHRцB)rm_|2_nHeASFטvNcKa$ Y|i!M!imEr){E6"z־ȚY# >4JTNVR s4*f;Dp'.pBV>tdEߎ0H%ȣ_&| D*)c5.T95>WtL%A%cT`JyXE1?~B݂rY ] * *+0 ˌ 4F#s;ӭʶ_=/%f !D$AoTz SFŸoؐ' ( rNc5BɂA mS{Xꌱ˄>W[SRtX,ꈛ5&\0þ6x̚`(8Ut&t/Y){4LV J8 92zܤ`5of;"J@+hl85 +mGvZ"L.5u0",5q42/hW@H$Ck y"Z(VsKZH4ڰ*PC)>844 d36>luk^LKEl3fD4' #D n$%jBDEKfC<ΰ3'%յ> nQA.3ﭮ<)g65DT-dn>^`8:pԥ: (}vG6+IAG< 5FAXe  dPz6z/3 q=(94DbLly#@6!)S u8mGց6S@^$OKY$Эx4^ĭ |t%y +Q9;+Qua[&3/+l$ƪ`~86yOw.Ord}H,C%n TB)VZ m= RzJNAjx1+ 7D@1`5Bդ5m0 hcJYM=NI3} 9.z-b< sڝ2K$+VIȜB\9vEQYL&#6QGdM<pP(ƁgU+Y{PaVBU}LY$,QZH'8fmT3ۀ?(ce!ճpgik4ZI03e6m]ΦJeQPRN h ߬tf (h/q5l]d&ڃ!KhK.Mqsym0WCmkӮge[s]c&du֨`V49jIh.#lln oMIph-p+jZ)D*9IJ n0& kkWwFo9+̈T29p7$%<%:`rh)n>%܂D{=kR1t׌Nԃ (L 7( ]ڢ)RCc뛷52l[5vlQ2'+ MIs<(@&A~?$o(ϸ`\8J R18Fڤb,xT##Mawusj =JmDR c%̩- X{5#s0H=Xv=X5sg&K2 `2r&*v!9ry9Y%sPBTmTjlXj;ix!P-6J <l,A..erM5vCΉ7<۳5]}z lNښ0.oVӶ 9 2_+2/ʀf.Eeʀ[QGtǐ<]|(ۭo]6ofW5 JȢ:V9+Ŝb@WAi#Leg:=77Uo#ےY>_fKZw밨f'h39^]/1>d?^CÛ_S% } ",BЖ]7wn:3­s(<}sWPxċ꺂—p#WWW VWv_r6poj=ShoHm]` /kH70mCwo*߾A`g~aΈ.BSb ]L)t1.BSb ]L)t1.BSb ]L)t1.BSb ]L)t1.BSb ]L)t1.BSb ]L)|b Io^qJ#sz5b V/+Mb _϶jj|<=MF3{4-nP!kHCۦE2o]2׻Y}s;ӟ=v ùwÊl=b5}3hGl\nkm~'pC\VʏڴN˓Q36ovx82rYaV/wV#qGGY-NJ<`J` Tmv,T>p~Heeo6o }2D 7b}+sW ;4h)^l30 魦Ajh543_꼒Im0ͤQ>%҅RRn/>F7w9@<ˣ$PEЮL &9`RLI]0 &u.`RLI]0 &u.`RLI]0 &u.`RLI]0 &u.`RLI]0 &u.`RLI]0 &u. &jMGgM=ZlX9HPPi@㐃!BJbīR7 _†ӽ4LčۄmL>wc-WR/NV8ݿ7jg>8)./lBZ>ͦ]l!>B} JT'6__j+x=@){3EDfapGdEūm+_,]d Z= >\(N lXg5^ϔafbzRi0҄AeQ&>/Nϛ8Ai95P后hXǃP>HpNl/> 틦B"j<0,>íxDy[4I?'ތgGfSgD;=ɼfXx*ύ6m|q<F;Uqg/F<bu O"_=k7<džڗzv=2]\77i8:]}\~'ǫgٖUj6UA7@⚪TʇJP)*C|R>TʇJP)*C|R>TʇJP)*C|R>TʇJP)*C|R>TʇJP)*C|R>TʇKC'' D싟!6cd)uZ3z6Ul 4 Fq)}Kzҷ`R-i&lٔ'N:X`dvVbVd EQEgfM^,K uE%y-؋[wsרRps0 "fG`:d̙eX~oԜ!a[W.>~^t%}5l?~G`^?knt'Z5IX-?zDp vflfYhz_~Wv׮08Xힴ9W^wR Ixy5 j@۫J^my%}n^mH[olimN:#LDш|Jv5rكs=yжUmt76v=_mwp\0]l֛,NpQ^Gy ӒeoLoΑ09oO r ߀f`t =#Mw`,vZgր#O.̗׏zz{7qO Ǽ5(ܔ(z\Ϗ S%{缦RKYI:Isr;Z̯>x*t򧲢6a<=F|Y*{C|C2f Agv5|a~;rdƒhW-ά'W@'7 wK1d8_:{ټ$~^ 5j 6B+ԥ#ZGRܽY%؛jYfQ@Bb =fzSw >;- n[$= ¤׬·v/{Q[|bћY {Oi,3 ^j B0•ߢONe .Y(&T>XDHiW ulFH. ޖɍ<Ig"58h *ݯoKp>^F~MrQ>[̗ m8X񳲷*ȵ~9\ifB؊wofO <ϻ 0Wk㲽kߧv|jf9bܻ_-Z{脢-ne ӆP(3G3`ۅA@WЂ$32~u.6Z jB,Բ^&OeoWo^)=81LIosJ;=sux(E^cYy)tSx ntp꽄>Uo1_4Sj- ,K,tDj,1Xۥ7Y.1Fcjm,veUrƮ$]tkS:L$a;ʧ~=l|TٜGR!jl$ ̙Yl kAebso_G9DyAhw뺢J}+z'Kf്@/5!$S1co:| e6Ү?S6V ^ 2B5ĉ׆*v|G[tJg%.)Lbww?WLQ3_?Swoiΰ [υ*9G[_BCD1FCmG#9SnݷMxјd52D*!-GB%9:G-jǁS!oW~Y@yzj? w^>lsgCt j?&|ؑ=7`.'\jǺB_|5K/lҎ<UMb1MD.J)lah{ǚLBAns#c=CsRH#;jǠhQ;| 3]l$ti O(FJ3p hnccpH>ۛU6756x5R+@Pg io@n^D!mRϴ}($eEl[ Qy2ұ&l,oc"icz1-,/vYDW .CԂ֒3(!*s/ER#y "9Pnjq2d8DN3\ I J5QH*2 |(wbj8NH(Sy)UIp7HOAH\qCI+3)gkv!FYVLByhEmqh|7uijm: |(}G Pn:͜CRdE\.@*MQ1>PS wZfQ>4h:7'`qC՘j'ذHU IX#Yli*x}>j̜GI. s3D9Hq) \昇voP*r)9csBÞb,~r$(atq̉T`Rc )#6Z:! O? !|(Q?|1&w .)mscϬfuѐ<GQCI>bB},X\: CZ$67+X.=1>RM~0z>[ 'NgRX퇘ۧgQ,Ȭ|hQݞJop,F^砄2nN5Vc0TiP #>|s:tsu/[g?!97I -ZZp7?϶Xx~h|E!̭S# GSU$?çq<Ї1CI.s6_%qNHa|2p_y8r"WTn!W-9CڜG<>JTu9ev\] c  L4mt\fCTxēe^wϏQߏ5(7=jWtzn9l &* dLp0g(vFG m (+"TvQ "^+nTs8k$6DkQBc4.8.QCI>bW }t8}ƀ1rFkD @sf aovv>並=񣂎:ƘpTg{y*se&/'b#`L*MaDT#/ǒ9>HQ%5xv@zoz",E.{5Ϭ&LC41E[5F%x_lo DcL9jdn;F/UP -Y.;3NSv" 6P'T>bqgLrw6=OwR[[OYS~k9x(%DFu t.vhR"pS\򋑼}~#pS%q BisYhh"#p#ܣ(Lb43q1ƄӕGQnr< +Z1޾hY pxzY UkARL[*ƭF/Is m- kC_m=2:=kzC-bw.~iKUEK ^ʘ񚥌4^Dr3 g@iYFhᷙMPMe7G~+ĥs:~";tz&%G;% |(ɳB*ٕ;IZ]Č֨(.uqrW^r^ȬG .@{`#mBᢠy[=>i2r^8=DPV0$j0Id 'jhNO;5GUA_E"8} 74d˦jazTΉ1t<×+bsߣF1Ƅ=FhuQ+ niX7VNo} XMXz^l~~[{Kx7_ __~).,.,) 0ZGZdro%eIW¼߀!`'~^fn?v6Է˭F9~% ԋu~܏(G{ݢOBo?o70,v?[|=|dI"x+?lN/67ǟ]N3a,ۆntϥ  lngmGW~[`y" vf'>\:-ȶdHd;)UIU$ºVՂ(OMKۿm,ԷԦghǣ-jot;/I|dսcR}!,LVL>pyn2SW`1:+ssټ/?WS!J+Mo{(\Mh56׹2mv)۳9*#bݽM?!߾Vs%u -Nro@ znmlzwBx[,f|6ed|t%;MG=_78k, g_m5/:YeJu`V_5ߢ،>hЛd }=siG䖷I~Ir5ƣ=ob5a9k;>ڭ{Ěˋ`{_@]^Mg,oj*z[u~i?zӌ`!76*=l̃'-^M0{އ)ȧ_~`e_2_(S_(Hm3; ]FysQᅢ#b[# T-n~APn]5xr^GymNAE즖l> Hy|0L<ݓo_*dB_IJGW0 _/Cos?ly0o lz %o,ٸ"ȦdfPD^p!(I*sduJRfuĀ=Fpy&"蠨9L p3J~jKI93>3'LXlM艶* @hQufEwfEdav4 TTl%͈,bgZVIvjhTFGAeqm8B).%sG%%ؽ{8kӺ渌 ́r;`afBr ^#*3> .'e&B̓u݈oл'xרJ0 JQ\2•cD 6VOf\ZrUf iklF<]g3 /%7]@u l,]uHQ.0x6sYx0ճFƒBtU24y.2Ԑq%P4ϥ B'P}z 'SiX/mKMl4C_w58B#iጊeKT?OQ'h"pHSGYgʬR<^̩6ͩO/XcZ"̙!;pey),=XS#8yaйQꈛG2 7mFZ浚`aޡjݻKl6=SkL5Hz96r.ECPJbQv` j=!l8"%s+/^9͉-bX;flB&s \Ed-փvr PJbP g`ƑG+%xP};`8-&U2J@&N*+0- /b*de QXWAx"3IŕlaO@2j> dݚE .A-d0V" JIWE>C6|B#ḙebJd;C 1X6`JT9șPeE! Xd2_Oq:%̉rHʧ Kq|?UPZh$qXѓrR 4O9P"老(WOf3 TF?!k,^`c Zh$X`/Ծ:I1 H\z`@+ Kd6Fu1QEd67z_mh&M70JE Z.*W̟r9]6.|rcְ;e.hͤw:U' /Lf:*3azV5BU?|0$XEoo|U a)zĒdl_;SDlyG,s0/ d2^ӊJ!p /lٽ  RUV=q^+rsHgViVXBRurCcA͢\Û4kI q[!8 *0:֩G):(U4Bʨ˹2b.P+>Ab'B{2fb~b9Ywvl3yg#z1[3M7kZʠHˁ̲;Rul6L/ 2őnK3#һBVhm|/ F:n-4/3J_EպESxx;{dNVILr0y!Sh`=BUxrPRT2NM,ASyTuޓ,c)SLs6D.MH>eT[y V!jFS0SM3f tޗ2Ӑ:FHw;2:zwkEzH#az،ZΧt,|bgO '@n!H@ 頺rk,&3AG:$ іӨ4c) pZNv4xzB< /GLL}w.Q5-4ҭPW}FIJ*}ex"ː7XTTaU^~ޗbG`IUBr9_sH*ݜS# ѽs 6p[v^ Ҟl‹oҚ)5a~szmc%`v4z /˭%zlaצ9)\5]z-4'!Q{%`a:.Η]@F!eQ)$lXF0p2a#OA˔eш ~d^٬,~^ȉᲐ&c{qs7{Z|2c07J)Ѽ)~R#Je"0 nYNMåONDG GQ4 4p9~]/)@p1y(ZB|"XbxLʵwAjےlQV<م#vfi݇-ob#bӰBSW?vDDQarB)a8 󇖇YNC,k]"C&ow:>ri~hI;Gseo[3x.Y|ԎD]_,#n5X* 1'Qbid,Tdne {|Хx %x3&n;:OxoOeTRoK+qx"I9j)Ha Z&$-4kx 3% bZL`L-x/ٍyjm!,^(N_xP1S2rC)iq,Ζ,A:gU#K^P˰OcR[EM7mluۀLx7ޅvE1 nyvz!Cn7';2SrGL2"Da5prrqW9+ 8G[[b+2 Cm/|e6;~r&ܸiOzw7}^V-ZR4 “8II\wQXO[ &^%Mp4_8+DnO~`ڵJ;!cޢ2 Mh[߾ECP2gm!c`IR8gRl'};bUEc5 Uj+scJm3X/Y 9_f6`jJ=ډ M|G߅졡eV+z?{Wƍ`܇vn/6Ark_@6IzƱ-vKjԌ=lp xGIVe՗Fmu=f眘w%XU}S&dd&FvGB̞g5 t s<%+ 7{MH3N($h^,)0R2Fch˜J:/&0;*0)fkmhN0]N%#LOfPKBmR(V %rXwqahD1g+hA*8#uPɈΓ%3kh](?~F[zk & ;6ư tnAe)|\)UIbn \- Hf:64À$ד>'/R{@IHUb!҃^ `+~)}9bo?dbX Z(wy?g友;7tG9i/ˇ}u^lr :V|њ#%;1<!T0DoUDKCoqǀEwDu{>_ײ^%ڡ7IJcc:WNʸڒd&` "^k&drg/,_T T xT Zm#\7Yӆ4zH=#Jʂ!+,$%K3yaǍ=ޢ'^bh{ec)Yjc*181,9#sH*d=}S> =LCݟi#^~YʭTH&a0M܉X1]Ҡ_Co'D:6FYGzrjyq=ԇ a΃D~} kPl=b#0̸;CI9'2D*;x)Q8O(S_=|6Œ {XPR8u> |I)'sxܕ 0s];.b@uK|5c&|6oC8pMO<˳eX<刃1?F :mDF z5a YN>o'^t[6Bԥdp'׎'cJBCYOX9{adi[0hږIXP H<\* N\ >왮d9" -HH(]kEm?Z<z<z'<$ܢr#^n {pvlqp fKE[䵱`L )r=gXd (e5-œΉxSȂ7Rj-qtC]=eErg 3hNq>\H#҅ y"K_}sxI+r w b5Q(SŏOɟ_AOECc`H)ϟNiDξ.bސǀo \yc4$΂Ҿ|}ȋ;wzz߹ent>5VV:A5 [<)V&K6KʝdHŤY/'_LVE7&7'n:p5^2%Tɟ(7Ǜo&7KAFZ񶛊J0GDQz!Z-V뎫{5v*ۼxYƸɫJLUO䛟tA&q5n2k  ò'N~|']ynlzVB`^v؇}9?-.XʚF$j"7r`(K5ϨXZv%#+FglyjUqJ9^}5l"Qf ,Z8 Lj8^B1m>ҾCoMdC=Rjc5`XIm@*EJPXY7ػERG~ u\,뜧$A$VyO 2jr~ጳ{mb69G~ }XS*&q [F62VLixM~#}/  J*ҥtvEXHjK!H3[w kaz{QAetTEΙcJ:a#b|?`[`Lquk#U[WPk|~hކ~#0:?ecki`~e$BirJ5:PM#Vu(;6iAOŀE\,I9Zf`6Ԋ n#_h`huD1KC,7kPOO.16k6d0V`\:r^僷1r)T@3>u 3^$|@8ykV*JK8R@1YdIsH%lNFg<̞j{f"|ɧ,4Pͧg̈e)p>'e/UYQ1Ɍx+YOl;o\(!I(R,P_]s f( pGzi9uI回Xp[JJ%*y˃$ڂ/c]MX'Umm}P |F_Z.ni=|s4xqlaD]/G9Hr4l9K Ir%LY|7'iC*&!Lo 4/^L_&Ww~u56t vNj3+:?Nדz:K`iEӡ0/7v,0C Z 4:[A#iLM3U;0L?ѣfկZxlmҜ=\SN  cyZo_LM>[YI {{vpF&ܶ1i85]  r6Wo|C&"L|}=,\>nӤH, *m|'|61vFE)HPMZՠrG| (`3{m@2b> =#ճ;v*0.#fz&6zD_[Z^ZC]ϋ&251/hC>(꣖ T^6Źf]~DJQӌ,iu(<Tt@Ђ=hPLA=>{EszQ+VM"%Hs*WZZS1ؾC?--0!'_ܐTJ@2>[$qo+vJ;WYZ\-nn,.ɋ,-UY{.ɺkq7o7iLf Puw{ZIX ȵZ~qD%"9Ia:;asuo3DLJ{[w])sԮ(@DU ]Fߤ  0 nng}ᮽzCMu߾}|il/&zf7$VsN;TӨeίHNͷt*U~B@uyZ?[TivX,~O7y㧰/|7l9!H;Ї=0:نλA3n~X-Bz3Ff~fKK_旿:>|>?oV3pt,㴹Wy6䟧R*k,nG^O=tfk"1]-%/)E fO RJN/S- 1ZDP%5Ns/y#n{Zf뎝Y߻4׆AzxUۼ=Gt;?{怋!Q~ h1)Z/VO7׻ARS]m/si~Y{_6V.q~tƅ쭦|~lnbNpL.R _9Ά 'ettebi.P)|R15;Ÿn̕aadھd\=Խty;[YΟ?}f@zF:+ uq\EJն^g2@ITה(!^M~yf~uuŸyI)`h/n׌@4IS-'&pZSQR V2 r;>d&缝fc ` RNg lႴ(:Jy Ӆ@+Ձ&^5u*܇3oH3{wU5̌ {P~﯐|U}WD#q&2t.@l& b% 9` 5A:ӕXgL%V#W6;|`wgC-?IJ2m`WͶCoKnclpR \s{F*^Vu[tҧEr+U.mL$XdT'c]׵>="8Fn$$ JS]H`hT$"2 x+IJFΛ/h*7F0Kd.ASYҶXT+_]O睮Bȃx"jAR]엋*JfSΙK*Sog~AqC>{c(WWιzu4/A%ЎjJjo@N&qj0*V?>yjy۸apa~?p` ,y$ڞ UDa7-nF"Ȫ_UWW3msg7Oq5I[;FMx5Z`cEtG8um:"V!Mu@ KV{7V]rT,>|f e%S@SU3 ZUj^ar݁pMm&gQa β)1Kg]Ew^$4^9O+m!9?\l0Rc(G왚hixѓ?ZTX8 I6 vs=jt497j&[:}eƱ;Tg++r O!= SPac+Cڛi}y{ƞ[>~3`O~ϖ:ƷXQ! LLG\?9F#[p!.Se.sv{~?4+?( e: QTc*J#"9!` peo0l4q{|^43dcaSK р &0 nig4DB_9Ք"'<(L>lKZ2!!_/)3bݞ?_IY٣516~.{3:<@XX-?}98ǘ?"_V< ^s1$1F$!.δԒZ|uyI%}P5\<=,m=0eE@j8p%r Ebjxm"vܫB*Vr=y4i:U~} KP$تёSj/k 6z3ĺVrDҎp2c*XVx@)rTXA_78C˄x>4hVցcY?~ ٘g)P=m tb@.ga*e&O.m qtn2_Q\pʌ7 ⪉Z|h&"l((+,r(*S=` ju"\m"\\V?9_YOM噃("X(cF#l:Or3\{JMEbqu2op >"#%hʩޭhϗѣ(dQREPᒥB!#8I]Fb9[8c,f(tL'CgVxz:l:Ҫ"P1eĽ\^S74(]z?[޲//2_߭۫XN%O0)` T"LUn{n}4n&q]! ^SoRň,0WOC鏠8HA%;d 6 ŝ)Ҹ֔i?-!.ҨsAFᏋh?%29v \\K džT}0x{?kZUrۚ~6v~>J)dӟ?*n,;\?X~#㴼ϗINrx?[v~PWE,ճ;a^:۬*b*~NykqQ}Wז @ZXo<Ȃ8@?"8 8ɊTw|4~jc8-5"nK"lUXػZ)3xe>Z$T-|{5mWG[^o\w搛wyOnJ!E`OHE8L.`ye.w7qk ON>YZ_ӟ嶺r$)^7cD B!V'4Lԕr5n QR<umB Q.5s;KhH,:?鷘Am[?gQWl Wy,c:9zVoJa\:aQwzS6KzE/wXѿk/ rWxim`FMgr_K\ 0o?;\0ɿ3{?߮@Jd?ݬ|ݾNd퐌x ȿ>*m:S r@f.CEARE1,{/?s^X^෭!38~s2MW-} v;j)GEL ީG.4FL9 <^{ht&()S+fO3w9i ՇgyG|IlԔW̻J{>6ݩҲI˪{MNVeNjZcտ:!}߃WG5?'78/'K)]cUCS8w0YF ,4SLQK藓!'œ,^`_72TVF,vZFdK<"&-E\b[h3au@O+J9U iT9x+e9l)-FlS1zϷjJ#FvC$HB4T:28~ǰ[X9!Bqa&%>*VVk$']`?֧$Rhۮ # Xl.e7E$Z4e#e ,|d)슪N5Eq4  )4<' .O!:c --Ҫc5\}{_IUJB .-){u.l ZC E^mosvS Pfw˰2Ҙnr>^/C &IgL DBX(%Ӄk"9o<}K!M;jXTB6%BbJ cdP 1pUy{WSK8Zdg5FǺŎ(9=j["P]0}x%=ڂ(O:LX{_1WjC:7vE**7qF&)4lG6ut@JIaМKX.̑y"3FpToSp2k`y3A{<P+RJ=MC XFqc`}9MibH!j9Hs*Z:R8Ew`!ߖ_,&p!W%!ESc9jX t@ri]( `3,&cGYد*7$%⵽JdRi K7Hi`BJ|Wɠ,K%+fBp/p؞JqBqe˜$-EE-cZ=UP,F))*@'{b,.ZVV8U|`C punt17SȟQEILJT# i#RR0ޫ^8@>,] GE2^央nQuSah%ZXSr7e@ǘ ,ٿ]gS1Ɉә%a1atHYX8t[.͗fX*_`YQȂX8&*?) 4-a%?&c{{c2Qweivda_KoRtYfb:o~ 1pT^W%_8Y!#<Vg9jځ;jGws11pzHm]T]wuLw8``ߺFk%>c`\usn/>w0qL%FHP  xbg8٤ pϐmQAo=R#t `,C;op|9Cv6[<Φ{r4tV#cEJ-Q:b`P?k91pz8,on!z6:Ka:8ђ N)g xnCCpS]N(IvHѝ7)6ǸDMZ Y+ ,5FJ76EC!$} GUf8[ AJ><MV poM C/9rAYDHN ]P8~l/0$Lw-qHlMffo f2wm")Zm'Yԭ~ʦܒ:EVɯ"X۱H (Q(5PԌRGxa$]rLvAzq"zWEl]H/"Xe0F/sHfƟ} gs|o3s:[33S:1<3dVPZ5D:zpФ30,mXTYo9>>Ś,FЄX1 u%V I6pNi&KM& eTG)@]20Z Bq#zɑO%>=uA&3IB-d .Ҧ.mqzV= Wf>lLBR59[C 4zfN =sHYfəOve$V]cL*e`鐡D=gt葤}nJ@g0CUژ:.YVY2b)!`D4.iNPt~Ի7[:@_Dn Jn::Xl%<` /!4Yfah}=|$v=  d X ?؏-T"$-e~dpݐHi̥au )ӎbfX:n`OB^ h ܁lWea~ 49}ƺU| 4zf?MA[X@#lyp@@g/?_ٛ7Ln3s< A\I*gdmZ5&P")K=3G%'I1TA |! f!~.@$wgn-h6Y /W_yj2m~ΣhRmً?Gx%_^#E}&@GL q9bAh~CA0{>Ns!uJw9 | 㰁FQ\~tbꘛR%ɩАIDGEC >R(&ʷ&T؁}v86 $L GⓇ;#oZI 4zfNW[J8ƕLh@lf>f-Nx^69]_9'~fAޣSD$jPpԀKD(W߸b6 ڏ\ CGeOR=ml,L]t2C&*.3N H{1V6n.܄ Wonڄ9܌bjteoL(KIЩ%TIMxH5':LnC4E\L26MQ+c[0l]6Jo]b)zknښ ?DIZy5BPjkmv >m6!U \r8[5&.CWnҫY3z )$8̩ІwDt^{:T WK4p(1aOtP)1曆k]"ozE?_"Od:/U_˧Se4M}ك;8`"IcTQ6d|uy:܏٧(rKu QkP( 0+*Hߑ߮] +~ǹj8R]C>r79Յ.U.RֽClUQP9zvE]eSkPBf2{WExw$Z7N7ap'?=gr(7{Cbq)Il@S[++ʪC(e΢LƠFяS`Rᷣk qa}ΑTBD~}]t:'/|~]T,p?C"$s`J}1_gͮ{] u BjV7!v1.jw[o  P_%qNĻl(aNX&4Su)fz+Gu0*۳3_m%  acj4%>Qg$Rc=~ 7B1,VZJhSܓ 8Ro0R#2 ,zWE1jB<8jVD.zDmHd0|nE&h>k Ll?Ip÷~hBǩ|5Iks`s*lUՇ>mVsPY&ʟBE'JLleTщ @B}*Y.ڬOgauSכJDaCUQTYK`>o|ƩNqdrt?RrIc FBc7 t,|sS~%I]436vr~1wE&3|dQWur3sSoj UMb&\f^q`|6)o3^Ø:S=hwbUrj/ۦ֕\\WȺ3[]po)V.bc;kh1}=w:3o~}uVUzK r%}R{f0 ʚ 9+ 'zQT<5.Wf9,1ȩ=?:u1eUe*.aq?t%P X%J"QafvK?'`Կx q۱"I(CCFfm.}(RǙ?^Fy@&am>6O.<ר7 mRp!jnSl`:҃;{pEF y L,ٙi#wލRn gK*GxW=ee#o柎jڈ?NLy7;ׯ?w ڕVҍ]W.[MyTGt#DO_(V|4+(}/l=fh)pb;!lo=o~+ nvѓa neE;Q RY!f29XgzQLS ~8Ol!gd#bI{3Z'$ 9l}&YKnbg_EAG#9QۣmGϾr LY4m*7XFp΃K39x0R< /5ӱQRTxFDd,FX&V$H939g^kv= |u}CZ E({3 &iCv6K}(`-c'HI)MT_KphQ%ɕc?k~nqX9a~PM*zŅ\*Kш Ai+71_擌n6,X~5`cϸ:s9p*-v< GY#Nf ?^&`ף@.18yWnl6ch6pG&T "fH!40IL)Μ)tI2AnFLxe"$fBzt*qR)!q0SV (YXڈ"Oܜ@u[=Y;5_TCi_L=v[2jjKGhBޥf WWaxzu  X|*WFCc.L2K,SlSde&ef,$!:!0ixȘH7tf쯦wL 9W*2­;,#BbQ&}jIEQR $`D8 qGGѸT(P.ʨȧ5Xѩk+ͭ$Ƃq2CM>TR%RgB13!%'$.}V\r3IlJ`ϴq>y9wޔ?[6^$G.1&#{QI0Q,(A$JpB1z>wI܍U|4+C i-`5DHwHQ\7~eI2S5t4qQy}fH\ξ\t򢸝m)"a(o<^ H?5(bIݵ}XZq K߯T9T,Uţ;[Q~|Eq=ko#7E rfs※,$mk"K$;39mI-QVSV۝d$Uuz.m90Oa9_i̚@,W7:LBbb7:4hLG% qo}/WMǺ|:u*wLCXHD,}}}?Ub|Ezy*bKE*'1? ?wo]?÷ Tx-X/r2k]5iJތ~?]I/lMgEX;uԟD-aɄ{:Yt)?ѦP%jy j[/&_x%\Vx|i@]\x骦ls$wwj8٢;Fe8*Ш 3 .1; "Y (Lxe藵u4 cMڈXLDa"-yC 24rrwC2;sfIh\]=`uBqf;ČN^MhJ=Q&i? |1l6L/ "aiKr!NAX 6'UҼX{_E3!&#,G5kM w l|3?Ң,oYSCŷ0N& be2g=/q`?A4 3GNRXLJh;鼯z_ -mhٯk/z.򢼨Iq' N|mU˵-YC|gޮ$ k djU l1zxv4a"7>ę?'Yi4a0ܹxk8Z QBct1b*n"/\8xvܣ _y,O]ߏR2A*x*İyhUi]PJTi.80 t3(4֙J|: H9]Z}d:[_ {?Yt-RbĭBKKJP%R[K 6Kz)I[]=\xigEء}V)8|>Lq{Uy!dn'}'0f+A ]W Ҍ8d;S[H,f4=|I`]g Iҥ)+%2Vxm\p-b +/K vխ#!e(R1LjL}&]B!Y _LNb W A586iٻ CRP;}hN98P1$')NV ʣѴ/LNC7/wi*y%ښ|kŋ#.Iykȼg"JXDC:p :U/ܿ'Ax~JiݨN;4wI܃D'ي6F6b6EΕ <*!׷Q΁z2%[jԹ9$Ot5RSy5K[䐦k j\Uf j*': "(mQ7&[h6=m]0l9_6+&yS[zv&(jp@m#=2|Ns%OP Im,P{͑h& 6(THd4xcd3jIb84s%Хk,c ti̗;*4{!npn˰!䇾D-:Bk!H#X
r,JK%h]gy5h\(O=ƫͻz ` 9,B7g?iZ‰U.WϯL0@./MiqZ79#)ϥ,~k%sKJ׶ArÚ7|QpW3)YLw77괹Q!N~Gdfi2w.%z6Kr? ?F(Mt[[Nu ݱ)"WKK)T3֪2[u]+t~}Ӱr]XC77M_p f7 oaj z}X6KJ-O֐m6 ?0.BVݺ6|↷?- VW2>j5ޣǏ¾]1<{զ|X#Vkjc;~Fn.ՇG0R= +s+/(oc-*ۺofB ini4UanЦ ]O.B3ٴ궪"sok$&HfP/q2i$ Ps L@Ӛ#`_ 4g EkHm`tɆs4U-NpMmͬ&$Vfm~$Tv%*wILbSVbz]{TZ]XZ-\&=s긳j2WHU兔&3pin Z<ǘ%لMm g2]X )˸I$v^.w9K_nrdDeߎi:LOqj{#[3z[_ G8'2Sx{p+~n6'Q&01Wg0gX_攞pn|" fכEWbXa|۟F5 EGK (0ᆩޟ1o{ِ8Pg?K{ɗ_Œ{L( pC6_0p)y2CNn]!K{ٞ2Uz;iD]2eY7 :3s0Ẋaˉ.sFeVa*X YߗUi($ٶ'MEsI/cCIq080qg۩XL w1+rE `Pb!r!XD gAzw!vD_ޜF BYň(2D\YV@am҂ 9;A< =BkJ`csĢC4k#'C'KChDQMhsi:Qҕ. wׁp!&(̡]2Er?;XS|b%3w1jS΃Uf&'tܢB$:/54֍&ݴp8Cp Ls"TFbpCBհ JQ!je% ;]N2uI)S\+\{G-%m]2GfHژ8KDLP[''̔Rxe8㬑1"f%(:He%r&A%q#EHHJE8+ $svMbnk):k)5ȯ )H /!p LJ''.]_#3qo8t$jpd&?]]" Х/ !b&O'[vp4|OƩ/ˠ:]5VmB{JVV+b5w\;r~b`mN;.7_bgV!rV(p,iqLѧiHxMx, TpR|vOWOf&~]G?[+I%`/_A?54UǗl0dWR9^ò.((^9Uӌj*9sɜPށ[ e$ex|L Yl|i<2Z,{]~T|ya?7<`6QY[ƃIϠg4`:.wW3pѓ3v꟝f ٷtRxvqOz*)c?1㐌ڡs`؃^CMډMzu`6KOF9Sѻdu#iFk6B}||dvMqz3t?sT&odvM~Va+w/jj??h; ``#FDoAs?,20;q#CKp3scOLAIJqJ7" N %0l9F[Fg[dX\dx> TQuf\hz짶/_u`wB>λ >=.퇲6dMqg:b_YyUŝ-!Z);1wnTeM"X a @|ູdiBι`3貋T`9v3|%gS4a'~4_7QV("g=egx$7U|4LY1Ks)=5JԔ ~O36#P*`k 3 |Ɇe8X~4-+nqĻ0)np0K'- '1bePG NfH}u&P>ZG9\ ǘ8P=tHbaJ菸7% ۤ|1]Cfz/=?d9e[U6GhYA]!Sf!VCoGubm; ߂~@x_V8~LG kp-zg՘IDk1Ml5ͭ5ͻa.txGPߊ`=Y-%[T6T^zޠe&&/ Lĩ7oz8.ڴbMdB/_.::ȵ^.FIb'9-F\xΧ?_Jָ2_/X@ rH# be}0T>0+Cʘtg0;ɂݤ2jw-ke-e` 0GM,֜jnDh:vF.^\\qum>j= ]I~<*'Bk*]*kBw1svt4D{S(sVAc ha[qaFhNPN@b➣ѳE-;>mЕG$v׾N7^'%^vu`Q2"d9-r͙A6ƒEiCtVwIJqTQܦ{X:2,"1rb7Eb?D h ۳@u緗gŕw*U[s$#泋D"yHt.W/U@SHd%Q3Z;@W aC0i,m;j%m;k!t_#%Q}$D)> K} qaXyl_c}GXF#%\]`i i6p? 7?듳jvEy#왮?N@A5* Sj!4P(:qJ{Cpp^H-m3`P&ۆMm*K hҶ@K{Tѷ&yԢ48+gt(CGD|p$jK)D kϲT$@혆MVX.JQ`j9̧dl9:oAq;#F)3!RbRqcXъj%nH ؂ ^h Q#\L+0R:`jE 8CDY.#E%<1b0EY:`ilb4n41s5I)4q1?bL-c{M7!P쬫盻(|;_H"D*W%Q@D0٠4yij-mͷX ߏO9M)P՜ ǤO=kF.؍Z'#bzX>n;\9IzRE?zE]%j:tu\9G4Rxa ীvW ~  3)՗mp(,3#fH3osA#Ι5*׆VKkms1F6Mݫs{&5 (l2>*&rIأI|*hbH xUd;^_I6J%$s-`є+\! oo_dW) 1*~S0@2x+RWBd9f"X0F"WnCLTVw7&X!vG}rimw1>DʿzM\%ߤ[=%}d͚dW^IiT8nR55% 4ʹUeΗ |FS.ߛ3Mڅ&UiZ\cTcD*44F[CW0M_Kܝx"VKZ2];hVU@cR3j{{7WMH5bQP{m=G룶Kt+tB+`"QD&ȥ/D-IT y<##u?]DV*Q+uǨ8DQW\Z_D#'gP]%rqoD-凮L#TWb!q,)퍺Jru >Fu(O+ X7D:xg舮҄ aV \ʘQǐ2Dsʩ&%e&9BAB9%Z 'آ^{⑴NlSIkg*lxm4FҒs/j4DR C( mV@haCgHQOh6X 1K]JɮZU.SLpF'Abr!C" "}n[ Ɗ)1ZT!ܱḱ=t(%:6ʱ, V@1 #3q1eTa#A傶77Ho`ZhR ZE0DXF=G\HKH`RX .4{ Wz i`λ*,2.3nhT;3JE)2Js0 #5c{coG]B{%JWRyn3 .*޵q$B,]`GC@pu ^,Ę"e)Y9_ gDR>HIcô4GuUWUttH $F{ &3n` Rj23(P/$h$D6m> Wܒ6c2üVb.2xa( zfi 'P%>hY?GCT%F'E/D#wxY)r퀡ph $/Ds`؅EaXsƜ -CTX\Ԇ}4C*D[%9:ʛIl!}b+3qU~7bYd/cKew5&1SJXn/݇b^l B3|S¦w--z.f|,Fr@s]ͪ,2G;Si԰;/S/NԞY?LnD+HVս\&Crl?8zM<:u_6,1,&ק#w]\_HԔ1тFQ4qCʘtt/Žz5_Ϛ?b[M+ןUYL4PqO{zؚoӘfn!e`(3*h2:ڀmT 5 !\c9@A{l+^ pP@N)kQ)h;PASOԒRVIXFPoJwǪ`B"fLyiDfLmV3L?y`3 ]E0W>lFP1yh2VQIwGq?.a 5PC 0x,V$ڠӖˤ-"GT$\.%Ӏ A2 c*,<} ^#aꁻW T<6&ݔ]i֭RnlA}‚ 7i(󨭬>\uiô,hUVZ:ԣ8 * gHG$br>ZO5sin h$xdn0M>Li+ב HitL*" 3buZI%HD0 AP$@2A6JQtnWy,*yHwIU0q +>nZhdWyE킊X 孤4R&J19]D'Pۡm9[ɜQhp{!5V{o 0 3E'(]Aj~9\p&r^)F\6g:WmqI;V"j&=;|ij!|m.E$aC +I/qf}N0Wƅ%L&Ma{ ޻{SBEcoIL<:f'X2S`)"'W\ J1u'&åܞX/O@gwJ`pVE -G-@krHX3RwASaJ\efY,>{7 7s]b!Q (M۬b%:;;\^?&q$Mg8EͥɪUC<6_W\RdfTyM~-iVWOfF0Rxf:=x;VtF1zיK?EI[h;g(V8go/̰& a5T\[lj47Dc3чXFeϴۊ]dUַZ զ Xt*I s`Gc~^gֱT{ޯTO?,_\woͧç7߽o? : LlMwk,S#c7Cm?w#x|vtyftpex왡+0eaܬF¨GrUζv^훦i6M6G=#2~]^ݮ,3+Mv"lMHkuE4t͖1[S(Ci(*MauXo8OOMcy4HzR6&H=)W j&0e!Ov$TaKH B AHe8R֑j% Q-$Od H02pN FNzy2JbH)GZBr۸oji' #hK=ͧ%b#7E|@7;_|@kij(ڟet>.b!t|ߜ}בJ |u&WIW &uDfhicF(0c݅Ig%#zqus5*Cz16ԡ   KKT)Jkg;ƷLGGΏu}peI[Um)!Q^1V.Q[%ZW>RL@SNq飶ěU`̙ K:RMɔK"f'6wK_`0Ik zQk %i]B:`^e.ܪ5`I!ցn}AY1 )fe  D9{mH~>r M+1X:dɘV!!: $H[XA`Ir,a#ֱ6dhIZ'iSҞA(u"4 U7yw^|9"Ih?]9Gb[8gШ0v6cL Hy\@;Ё~bWϳ>nO|ѣ~5n Fi5^ ^K KKxr{C@L<}T]#<_W3sVLP[[s*1c}c%xXYhKi(0skY'7`"{&CDt)RRA(Ŝ(aZ6N@poSu8]l4[][!ZPb}5I{oYk5n:N.G. I  fIh\X:xbXDˋSKo{tSvRۆ}P0N۶%_&*wLܒ [V,JE\b?$>~:M8!L\x.̶5=m's7ņ:o<:4+.*Jmf KuePo'KL^*280kYE;NPinNhtkFjK:ʋBQyw?mp҂mˈp k‚F|.K3$<9%}$rqz֮v !X$j8A [I$[oǼמx$[sEظ*{ӬܝNff+a*mRGNw=Cb4bnnPւ I9UpЙfJo,VNE.<W켾O2]JaV8`1wa4PƽTfe xAc;w3 윢dV˫ ΍e)>9+ž^Fyj++@1ExRf(= x-jWtAƕ-ҩkq~>m^bRg6(Y8SG8S$pfA)\֠Ƥ5gm<Z 7,0)+f߿`Qq2@`hf)1H)h;(2`kӁm9@'w"l7(x][{]˧Fp9;n uCvlzl$(3_d^RY[@  z(|v7C q嵦jP1DzjІ6Y$d"iY"M2ti0pϞ9<F7}]oj Pjn]RkRR1&tA,Isz4f)nq XJSbҙV)2S(}+sY&X| r BgJ8_[-9*VJWXʲ^֜,O byzU6E3=Hbh՞<#,RFXp4gdqjdhIp4oT/WM@H]A 都Wr4w^rsy~3|vP8o_=߭lok4[/5' @zZ-@`|V詅L;, ^P_NBxPCW`n򿗓<5HvqfoGGrȭ.J ߜRBWYy t>6iDYK.Pcv 8@_sTej*2ͱA &wHrCnx ĕ- .Lhn>.d?ۦ63e&#QHYu2LwZ *9`ZF 76MVHKDk-i~NA/]܌Grs02^-*W_:o3tTvCՕ&޾謨S1R'vKzL6$& dO PGOo-.~l^tqn]7WjF΍7x8\S_]9?AE[< DU*Ld͍4ׯ|}6 ?6t[o_NϺ ;0뵩/V2فh"`E̽&gbGr*YE8b?dl)04=o,r-hAC ],1}ngyL,c05%a@S͍QmQ'($9ݼ1Ԯɲv&0[2X-0k'F8$v,|W" A7S5@ٙ-]V.=ͪљ( 0+'9Vb a("F"Q$iЪՄ sL(QmGsbȰ, Ƹ#aREbvBֺ$#ɫΚIvnp`_n YEA;Zs S>f1iWF_c# mN93y`. }39fȚQ&pBq1HJ}:oT|ԯK>SBѸwQu갋n [{utss~|gb8$ "#wx"4kJb\; ސ>m$Uvsh4BkA0( F刊`) NPL2-spk+qc7 rV`V)Jf,Ʋ$ZÁOV'k0.˕0 ,A,B} RTpim.h{kCX3((dQ@L?7ƇY3w^!.z㴢,Eo ߾'٪(H L,-yFtP[tí_JAZ7--M]0l$﹫=ୢ n1.kZ8`s#^W.l=JKX\ b~/׮X^i6F%?*˹^d,_ܟ:o18&E[{Iv]tx:S0G ^enfe g]t:ʼn cV J';I9v u"oA]DX"M%VAXI x."B&T`)˲:;lhӢd͢`2ܫð Rz:)ze%FzmF*7͝w< {6KKVwVNꁃP+xFfLtUZ)q {-By |i 8od!ݧj*V3.4_4s[iwhZ9Ӗ+M{vzLHMmRHuMSIw-I.@UKI)mI-Vx+ߦ R"Ed1%#Ʈ1xQۛ{O;HV}%uG`.4P:03d  -1f1j \Er* )|sE\:W{ꊦ9 3 ۈiᩖxOCTA Ŭ0"M rsEkt}h7s-)ײv|SOz.Z8OMGbnbMT"@Aò,hANN`ũ!Qxk1:˝Rh=EX9fZK1\ <ux05j}}&y ƨwD@LsdNF03,*͸Jb,#FcAP@à:MbV|̺k^Qa *b-(,ʔ![#eE$@=QNr9pߖjچB<H漍B;# QxM@tb(9#Xt⎑e yg ήd#.~Ȇv ~3t%})Hr!=}B$\lo4 B $1V{`ָ qaڃd)]rKf=U&S\ECdLt"cX1P0Ed?JA)nL&/'g OC_UY(P%XMIVwkF0!=wR>$* Myh|kSw9\f#Sj`fUgG9D"pp4Y׿jh3tht'P!}JlaLYzfu%/gglEgΜoA>~*m ^r?trapjj^[:mj47Dc3чQ0i&DOfmN{g7˭zM6U0MfcPHjm8vIH7Ҏٰ< "RsyR4񾬬7(3}rןߝ8~Eɿ^ًw'㟎O޽|Z2BӸLVFM^12J?dϿ=罉KGPQJ]l˧Ӏ8e>'0Q7\UEĨ'J@ҁ+M훦i6M6G]M2r iC\4  W#1oce_l N3(*M°>y`W'`, *0wHsIM4n֏'?buĂ L@YӾR*% !y M$Ԃ`ΨݑZ>܂X<1#{D2pN FNzy+Q8e!Xkj}w!$v$F#r[9+%b#i}Jrݣ{5=oZtZ;b#ܺ @WYk҆IQN)93Z'O\S0֥KY^vo_]ǟh խf)lo\!;a1NG`0d~sVk/;^s%%cnM@fc`*W)̬r oC&೯B))ȯ|(~-~ \HAvY7-}6ïDǚv@Hj;?oSk5v!Ue!BZY I̓RN'% Kpq PJ>+~핎镣7䥙;0)p:_aLٍwx[%qح}| un(@zZ5RP2xAjn;Sy{8K|'% TpELnW$Oišl})nFO)f:ޔ ŖV%ohuBեY!8&=7Au{)!3rx>v!+׽?\k~2MAWn|I`q@H FaQhhnB49Lp"7h;R 6Gy#l (&ԷKSA ),</Trv6|w/v'V2ZRE )Q[M*Ffd j0D XTy][r״Gilgt!PYXW8*/_M?yt2TȠ OP fh/#`{t$LEt!"'U2`|@eE.qVq`2 @A3 ('@׆t #&Ҙ68ʆXc=" #$$8XG!1z0XA`Ir,aKaె-:N8&9As 0zu]`BDmoPLE{{vGa7XѷZ޵#Emnf67ܷŀGđfŖz3Q|?*ȺwnrqXx;*m$ZZb7ϖSҽr^6{ 9*樘k[kTVU!chdc"CW S\i_ɥֆAlX:YְqEFwIjrd֖GKIo[6-daIj&b]|]<@Y=p:ŽG;Y-KM=4o9KTlK]2guŻ'iÔXR61 uNj{aCe.TZq֥5ɈR#~p8孴pRam;#hA2"U92"*Ӭ2J`!'Pei3SY\똲YI79j2Ft|5x+ԅMGo:~-R(wSTGJwv{ s2>}M)|logVxCb9&*_9a;Y'w)foV\/Aei_G[sC~ Ʒ]͘]CJ#mђr :RKCûR:Mfؕ#ju>mudϝ#]\"g׭xѝ*MںA^=_E+V@^VR-0gu ܭu߼_щ?^L2go4k+YO>{Wa7GqCɖHq#3:͜ء6PӗmZ>cQ ʙ Fe+0.cp^H'$AЬl`7d$=ww[ "Sfә*z;"j^;JZʙIպ3ycInl=)~p4%~ng>ޝ˹+ J-^꧳~hk:6iWA̸H+eVǫIi#l,HDԛ\ U2c8lJA`UG >8ۛPM y2,Zab)K8h `|<JbhVZŰ9Pp%< \F2dR31qjW՝)K 0;N,ShVXmo6կzH(8?jPp+#v [㈎  Pz~㤜m|TG)+)оK5=ьr%FpDAxppv$0gLDklTpkt?Z:x® ;r9fgiGoxhLWu3w)oe^ PnT@Jq XPP䴬>'cH4*o3 h:naˡ&w6-{+ـoiog$.f!apRI5ќ*H)h$(Iܫjssv )̹IzA>')+0Lly:vǭ;60nٸNYM¿>^ƠC\̊otrw?q;]6ܼ>OQh&gvN<v ?_~kZكo'J[@j+!#A*% א26*%Ma6/˓t40/4_bN&/O^'|8Ӈ~~5Cx ZyگF>Ove ]BziJHBhz h:<\ʗnɋL$AKEmUlvMom[aaٍ睗Oknz1y~|e&-ݓ_J,'jt2.9Ek|_ &?;qy(ۮ\B{v:Ai|E#ղ6vy<'교{0Θ]eu垉U eR9&ojd$$&3: F^`Cn Ve)>i4>Φ T^-JK\1N̔"Z(J8gq J3'mIP)K-,ɭvöcdH\D rZ@R.!R@@@20Ά~Dx"XO$&mA8aj)&$pLpNuPs  )B[@ &%WYj2R21i`rd>w;וAug -j!jٻ،"?~^1.׍j+_,gmKƭj"55}E=4&Gf{Hw^x8UCΧ[lZ'wv-WzG.}B7)vB.oweqҚ+Vih);dw 7k[R^i[u2-⺹5O1B[!]upn%˦Z R[xa/V]bs?}~yUTdAKy1UokFy&ZA@'4oЁB{s?=u!z[-58y ;,B)ƅPq"JA9^d:ˌ |i%:U%ColNG j,NC1!% yNڟJT<^A'dK8ɇ/gvЍ"kŁmQCwe)3>9lR ݃2@'/١QN9ٱ!4rʝ]jgZNj䧥d6|]k8.SAĨEH(a}TzζzYX pֽ{M2w> `$J2&8_f\Mpj0.Z-Sp޸DaEKXcB Ҭ6d!RRe"B:EiPyZj%BPh%VNT):N~ 5wB6xL28QB{f4 ErҬv3?Bko8'Ma0Fhe%`9S,i UwcQ3 AXLjܥ649zt~̣fSQLTh +HR Xw3?B %n8W v}9 ɠ&0?B  稘 zmLDXYn:cP^[yNO"$YbH'3cܛB j! ]5pG|F(oc 5N̳h`,&9E[+0$R;9 5w{1&a-1+^ZHQ]<0?B%u6I 5 J4(=Py]mk/NRA2Q4W:!:R0yBܸb/BG!A7a )y4=:?B ^(3a$C*CgitKquC(`ՙBZ H!y 6"srPy|)aQ ( KBDױ~ 5ל̣+*I]@1&CTQ0oc{ӄrgV3ѲqYڃPe6CC"-+p`L'8kh͡Py[=W %IY("M5ŐRz@ Ty6#| g_^(+Њi(pcP<"%[AHo@Hxp"{ 5/kLE/Ih$vU*aN'3C(TaO8%ehy$Ƨ֣%Cj01mGc $ʢE#N1!j0(gW| m#>&y>p}䵱,R^ʩGcj WZӞA@cQ p.Q泋4?{Q!Gi/Q1:6|\HPMZ XV¤!j0Tg d$ZT3M\ EL>)Taڛ{`,qp"3(C(T`^S-:*R:aX`w͆ե#=*Rz)4M5mI`FVYmYB"<9v [hh3]& O~rύpʬ`!O},+ZYBkpMO4mnH0;'}ɯ Pyj3R<QDXpD1A:/{@ڛ2n3Pʼn*2X33 UGRyTYgAȢ"i>Pecc,ADH`@\(Dsr`zVP<|Ђq`:9%L,+=C(T`4CDӔO 'N*%.(Aqp<`5 kMzhc ^Fh$P9Z3Q8qݷ䅠Zue못 .}۳Uww sr6 `+ $j9nJ1Gegs$Z*|@ձS;Q:7WJ=+ Y\yMBSu!wo~q əe\ƿ1 mMHNsQ`=*8bpiT)YqD5ܤr">T[#05.Ĵ"L-=[aOO I`p9eB4R /kU1!5ؘC_s_%<;zm.o-dIR~wI|)*Lc\~_V Kl3A|( KҢ b;WSdg9#24|&Hq, CDm» ^Ej9 9`40 π6$9CnA@d‡Bc@8, zCـ 0c&- k}:Rd;J&`D.= e{r'*0a%c󯾭Ɲ번 c kF1 !3cLny|"!ؕʂwe,#I:8C= Y `* M`cu($V˂F,B0#)qA/R/u:d/b0+C)˂FΆeָ\כo48qrRqwdshYe]!\ϫ6s%X,{VY@$ "#wx"4kJb\; ސ>m$U vsh4N Ƅ4wF!Q9bH*:dZYKq7ǩ2xl(N=w^UzyLQUn+ZOSP+vGڊ,}1z^X>0)N\G㐌KZbB8(*&.z.z ( 0+'K$>lDn`#('Z=wVTY=B,7?&5j.> P|cO| b]B%ࣃmBL!Q:wM[ FT|+w%/뽿6ʮ{(^-'ٿ |T-,0p9{`^A6P^iR;ĬW逰 ߜ \tapqڦ5Ѫ8 b5:iBZxwH *ԭ'w[IpuЧWe~3cwsB hLM([ GeCYPZ{¥ݮKn"KwHj}ytZ`Ғn$ )[X1c2b=n45D\7¬0&jcyj Lv?c @B  4c:L4P"D{ZL1S{ґcX´ -,QNrYI(aaR"Zoh*GyrɯaJ6oQ1ߏ"'|,_ x Mb<LI __VJRwq*O5vY[S&%o*o凔ZFd> ܀P>RL8飶ěU`SFڶ2F)Ob [Bڵu-ܕ-R6J ϗT,Ssvt3rurW rح(PB3 JEB:`^eH ) ӁiAla|K \HA803Mg62N@@ Ⴑ>rDPAӆYtӱoHǴ(:pF@ĤHC΂Ks Hc ui.55lJ1=kby_UDb;a?r+ogEt0OCSa69.k,M`,w_m9iV`=Kdp((԰}VhF*_z7wف3?|_ 'نn0rxn/zq2 4^M%,./Getzz%T8 yӇLDX,-$#mMr,*<Ӱ wӑ7ŗ0w^VXMRfn^bRŗ_ӨwD3;s!4A?buJJ1[t,vL "j$W{ԶasϮ'&߃Ȗm6.tnp# ܧ0>Y%-ۺJmb,$%TMU>!~cF ;&v[+(\Cbmv7ӉT71Iu$Au&ih)ocJUbf@w\Xvqoi*9DDC_E_5&>4>!ݨ[@PԨq؝pw{~,(xģǚ{,׌"q.1Xnpd*bNXH!ʹt62H(Bq ]v̧PI9t3.Dm%D(XoNܪv[bW&L40i+Ā́T!NFas<5 "lj#YCb B=Fqo}t%YBS~8G8sjXyPm 5PFJhMOXS+ie\RprbV9QJlB O1jve{Bȳykl`O|o Ծ>([M\,y5t!slr{GC$tlHxxkգ;rxP(N [UY|G)z1Z:͍9$0Åiз`PЌbzGDJN!4Hd 3âҌ;$4"P (O XqR@pB(þ:9ɔ?n$ZhdWyxԤ$FD9&<8H{ۉp?kDB<H漍B;# {P`]@F@X{( @Sv!):[): t Y?d;?.9]qIs V\q8WZ$!KiE,u*X; 1 XXY>FIFƅO(wNA|IwSչQ>f dZk1V )D0Kt|&c3 ۿ9^{@06.-xZ@,[B.֌L7;-W$E'1y70>dE.W 7٥) I%_u%:9y>|>)gH14mZMMs9Nm>_8OOgP&?fTΪi,~-o^̓%(Fg\"{govnDj\~un)sxjƑ_>E0y%>F' G1{)u2ɦQ*&VGHm0tدgj/KΉNUg9_>{o/O󇷧/߼?Do_ p`\*4/=UesSWΌҏ/W__uo$\O3sJ/yäG cRj#c֌}s!l6Wl>4%m Mm2hs5760Mu4! 0À@Ӻy1'c1m4qO_,bAC&c8y'PneiYYT`#tұ . #<#GM` B=RI-q-TɃi"i8vAcL,a^B0 a=XZ8'M#ki=<-JbIHwM6ZلګoZ}DOx.M{PnEyt1ka)k &JrϷ3gF*jZ6-~l!K{6$Qs-9˝Rp&{%f-|t"g.;\SRɓ^C1N`}d~sVk/;^s%"cMT.|ŷqYI}緲dɋͪjXq$B&FWkga~W1E2$e[9( IC#g8]SUTwS} ϽUKzx-ػF'u<}}\pboA\DeAѼ.gˋB@""R )p#B+2B*V{FԆFA`R! ИSxh)Q R1ZA ]XgXXq @g"Cl[,Â1S Lö0wq"SJC:a.7abaE/$FSpWGu_QZ(b;#ҋw)% C'&mD;MMq .x뜡G (+jN*d !cS囟p6..jRj vr d|G+;]ʡ[wxZU֌yc>ny|ѣ|ktk}5!7VJcvzzb_2\dQ#]Tw7Xeyֱ]}䛛xʝa2mx)sA1B-fq/s±+ސTE\Ϥk_ :j'F{Fn]l~؜fx:h ݗbu,`U|Sдl+ lάRo],o`\u0ZF pW3[7uw:e@W1.-U)HI !`Ap28M|tPِeB#0@R0')*&yn-\1F#J^%]pM8jPܙ.>cxNi"x#IJ'Bp2-W"G`!x IhOoFMjG#u o<`` (ῴJ !5ؘ8;/?=%O+Y;eryz{]UtS@7=J ëG«p)vaAnsA!+9m/5qTu.) -kP֡}NV؀h kfi:u>, ce-rW(5*{,SwW(Z+ՋqW+0w-*K+O]e)5 tW<5-rW(05* -*K+驻,e Hw% UokhBqi *K zJ&t+֞gY\A⮲B)5zJ ]ڳv孩ԘUR]@weM`Xָ,iBi=²YJPzJivw>ͯqe4_~s?T}z,y|UyWۙ+ H>Ɣ4uFZÁQ-MH K瘶{%>Es92\w(lT>PB .)sE) PRx}̏!  jړ6TPHl9-JCiNq bU ͓.s}פD%- IdXaX%&e\;-M)>RkYu"CYo-VQ[nh04:)i (3cQv\(pQKo2\4@ +Em1+8NϝV;eLEtA!7И8;*Yɐ5,sY/pjőb|(w+[xJK]ր]׼8W8. 3 aB u$޲$HFbY!ǐZɐ\!2.Wdl9$\MC]ܰc )RJ3Ui-pc`SZDhQ0j'!CTLWr-){&ഌXwR# 2hߢQ'iIAD$NO@HtEvTS.)$Pd>8 =Iҳ2 F9BT=4 '/vԬ!M<;Ej^3>>-ϿZ xU͈"rgr'(8Izߥ~g|S63JB)wR'{o-̧yQ@>rO͋JlL.t/l:C'xy_[%f% yhއ'JťܐRp<a0H}9:lg{8{c[:(ȾYʻMiGLA`zw \< ˮMW8W|)$N>@jS޻էݽLuT^x{54*8GFK?Lŧjl܌B׫YIW.wo8!u- QtY oZDUepT(~-:Eien||#dʞaxߟLPxnt9B%H8qgUuG0+jK?#jF=0}X>x4_woXSMc{A]Mz>;E]i]JC<ԭ CjXGr5#FOǭ2*F)8N|hv >}6X1]!~VD9<1"7a.OS;J_)9 <5cD=TJ+Oh- QK Sa`.{Ց',JSWHFwO1Z !\HE!w@ Q.$b9,c MF6qv5S}k筇.4FȎ"Z\!;X>QUuu";\kL`EAɇ[1_b8ME|!On~>-ot1wJk@tR{r Zx^cr^ A). v!ڄg23Jf9VC:鉶6Xj5`^Ϡjb S?g6sF?3G\iKڧW%^?^py:$*rUL$Аr 5A)`M 8<,gZȑ_aecޏnO0DlIZ{b&E*Jɢ,@2zZo =4'ș%;vְ&3SԾcR=ܡ Ӽpߕ3wpfeʘc-˺r:3cU81StxJ!I-xtP[ǑE2l"wVVt~8~8Z~1VM=}n9LWp;kYllyHm*gɻ'h3hc޶2_^Ff8;^R .L;eҚ΃(N*z.jWnC+DUy=q8rxZ Np X@;a8 '(ByRt’u"-JbRk&5?hSaMU߲rjP{aT ݎ$"A*KɹG΍s3ƈ\MT5 @bNJRX݅`ܨ$s9(!Vf!tXs'-e0=v(=Z왇Th{$|Z|즲@Y*i$8#X4Ly&Ѐ^[IH5Q٢Wo\ul٩vK]eraTN/ˋ)+ԥˣUb(`9=Rh6tOϮ-&tHGXX3<z,e*˭g͵ hjoV7[UD3P")E0?r0LjV?Q5 lvAķWErR ?v'8M2G7nWk2 ߍ~Yl6G;dֈ`mE-WXG? ~~soqv4 3@سQF룾I71A!{r&5i-Cbe1yKFj`s~ɿ-,_[,L ;?O]w-9~a?դ_2QJOik=yz(:'}1Jz0S('}a-Cΰ cGg{A&pj/F1X%!P~-QS"=%OWvLV̮ڼ'_Q֋ݦĕ^yoι\'Yaxu|U1 0`EOG-VvI^飕e~^=3 9y8ߌqNEG@r1E I&#>us͙^c v^+)< i&XtDS$HBQ R9N־tRg~֤*w]{s}:0.l=K/^< _T>R#YeM8-0u eID%%kc ;a a}L,[`RBr{R zϕOTH`c)qbD2K6*- _2vbLYpYYWed_jXgο}v{ _sW.YĄ0QJ٨|  \41)Yg1n.g`01Iiiɖ c%ː?(fQFdRyɝAQTsk(7@N`t[/t֌ R\M KmRvE%DURY WmYT B^H@|օ$`Fem #gE*U۪S JK3,L8M\p^($E=M+f>߱/c{o*UNvLNh;SLS{|ok_ϰÛe8xJ V:S1Jӻ Z+flء٣oC%.,K8Dwy")cQz1Jfp׋_)7Mc֏x}[6{aR[*l!&ZFcŠ/y{DntϿ5Emj}sus{UDwLڍRɍuxfMU|3H!\S1p$k\S0jR `%F1xFzZo =|90vְ&3SԾcR=ܡ Ӽpߕ3wpfeʘc-˺r:3cU81S<8*OW4Iʶ~gWqHZ2FXawuwuWuKbZ:= Q45!k^]ucգv% ?\n"eP{)l'rla;S)hu`rNqgbNQ(eڬQS(9qnuvg[:Xsk|p&󢂰ɀ[pO+>%b@̒Nʟ@z`g'ᓗf&T_uxLнï)0F__R;{ݍJg-a L ^2r:z ?1vr^'v|P # m0tgvhꢭnu QMtZ8óYE)Pll\cj~kcd&O*]|g|3,F=_eҿ4,imljjqv7fT~>\f5v/5p~`LP_|>OKP%>R2k:Q'ٷoǛ~mG`tmmS0oZ+t)UDhrךDZy#riu* y&{M!ŔÏd?pMRK2y\ k7d%Mg@=D$7%΢ G%R" MoD ʷ{Rj~8^e0NP՜5 Q/VBdC'{ߴ:eh"ٷF?4wզ;0~;Xsm5Xk{;YWԹRn[h t&ӽW \ƹ~{%⚰6͗8 Z>Ȍl׼2w4Bi W'&A C5Hwzo\qJU_>hv|jŹ5$Xc U leϑA wb%KjN[.w@HXG"\JlB OA"¨U1kQR%&zqyjkl"1s:;M\f~Կ7y9usl!6SxCò,hn@Nƣ#T@ΐ( ZQIct;zs̴Nsc@@|@`{8M3F@4:&(fEwZI%``#Ay ݏ%CEmҬ>fެ[uVQ"ւTa@Rt@).""lGam:n?jf#9'@x yv( GR#j;x` FOUkkdE82̊sl]oſ'.{T.MWG-Dw`%wt"0$ɾ5Cz)my:e=r<)"?_yJUQeKd`Jvɽ0EtNKA)ԌF^ݞ OÕg+aY %l\R[Z,6 kF[(OsBQݬ`n:;?>3Y>Vgm60 g f䷺Rg 8F&o[UCӜaC7+.SI JlaLYzr_ߪ'/?܌/σ(Fg\/"^_VsKlnՊ_n /?4trB!=1nHs7J4v3,o`#PKկ4&W]7{%h}A64V4uUd9CYi<ç8WެwXjt=6*'VqzK`o~ŋ?|~_~yË7?>x: q陬*- rd~C\r"–ɟs&f')|'__W73Q&'F<1m?-w wl5%mu Mmӵhso/m߿/+Л+ ~@\>f6ZiM|]Ce̖Tޏ1c( $ '0lk~t1md~V;G*ժ|xX?4˟@?cuĂ L@YӹR*%v B AHc8Qwӑ$˭Cג)Ej 24k!mOmf]Z%>uGw:L̀1T)b+`Y@SJx-UӒ6@\)`˫e+_ t~>|l %mĿ%8Ͳo$r՛ZŇh/UG;hsx0o+|T|SbB5@jO[R R !1_ */*9W2:?-`*S<`s- '10TxUCz 8G(wp)B0cF|RcĜEӨLDw8kGqOq5'kIHv K47  ҫmm;w$Fu7/?+Zp!ɹsL\rVjpL]& 9wTS$r} Ra%)72!H8 a $'ްLRHYu2LwZ D佖@VDk45[!--߰FΖl~ Y< l} ԙ-暇Ud.<ߢ 朗~e=.@e԰VMM+tcɿ%"גVMQȀ.&0`z;n,v gNAs;6JNZ{s=? 3Mn.#9ѶBΥn2ꆆ!{6 FoʚO^֝жu1ۿR|Hl]Kd[nm~$\#LB$V*fgVU5[YJRG6 nVi՚GeMª5/5яc+2aDD&p mN57V"FMsFV;||ZlPC؂AS?kT:::"X!S')NNJЋS'*SS3U܂9[iiGۂ-"g`[.[JL.0"4-rǵRcuLG -b4 Jz%8c$J@r ܵja"{&!:*SDcL%D9&",,xGI4 GZZ#g-2qHv^| j4ʙ %;/_v݁m^o Fq9!BZ0**c9#(]9" 歍'rR<8[l:cP¡ |8t Ɖ{Fy Ü]|P93X?pVތUsOY5ĪiYVM<%Z(6 J &%j4bN *0cB)7jxm75alWDư+Mߠy~ױ}bCKvd㽇tY`ˁ Bxx׿IpN<~tg12t+87 '{ay"}x~پk`MuMͷZ6ԓߢ_.|~ܯD7߄ʅhuU&!E2(@kp݄jdjGw{oHc:0B\b'cG<u:vktZz| h'h8R=x'G5M\sӝ'x7)UHB 5~WD'+Q6G[-(RfzNcs l CY ZS wAB-M Θ &|F=*#:$ ]OS!ǂAĭAR RmB'մglL͞q=J9/lL3 4 p&Q~]^.\-bUjvs\a'WAa{};}"SF)e"lpH)X⬳TO',d 0dg#&H%wLP &1n"72 ^k46%fe#'`ҎzmڰצnZf$iA92dD#,2%ǘ"u!@<ꨬm6&fX6膘ئ5Y[؄2TikH$|Aq pyͣ*{$+rB6j_vKvvv:a87xߪ;Jm*~lx6HdFH| YSYe ௕9T?Bc\XHڸR  #Y(")QƏ1sZgV N^_]eXY_*3{aҤ/z:tJ-agjЯ/nK8s=gc]6!f6g[Kf9VC:鉶6X,:k.[br喖[On'giw([f!)qD."w8QY30C0>淸H4NZo;6+pO:UAVϙiz7I!hhv3l3sTp&zǞ}Xt:tXbgi*MY&ICS.=oQEap8mhᤁW`'~ $"RFJ0DH v):ai:QT jHbR\I;#]`OOxFcq4 NWtSȱOCPS$/j?Ya+n{蕥5~ϭ"E :iRux6=D3bR, ^` ܘ#$sB]QBŬ `!@L:NZĀ%8(=X9dj w]:Pgr'hK48#`DS:I" C<X yɨ^{|‰[IiKQ35:Au_1FM+['[tr/H$(8w?Uz :͆jقY.9/ds1[&#/Y\vGv:uoRcucW9[2w$2(z0tR.W~Z|k$8-4))ZpԂ#hp`Iy)Qi ;jQ2oQL+!iF4T>q!P`4R`#NR߼4`DXpM-#L.B TN9mͲT-Rx1 zBi*P5墿^3LpPYo[~s$~5U`&tUC~nzŰ7ɭ2Whpkbݩ޹" $F ^x0nEs9Kk*l3J;MR,CU.m͑} @4 P`u.z!Z<^[~^>"P9t;~vsUZEmwO^V~,eMuV@%=$h$ü q{ڞ)\EmRsZ;8Qj>B1NU)&U -saQ^5͞""R )p!%SQTuG:eDct |BIDLŹ5436anڅwz0}nvIf_!r#iJ#bh"='=@29,|n]L[7XA`y/wֵ]vqU4f=/`C+T|ϣij|KA_pf¬ˀ-9η9|w)ƷϬ~x hls0Cw3WҀ*Q# l]|z3+YMz؛C`IyM4eoɺjKc/UsI~rsz5m$ä sLx`up 8xV&x=9;8Tjd٤ж&̑L9LRHl2Ier8LRZ%N=TRc&)&>`,|n⤍/YrB n-,<9BZU?OU+YDy APNqrF`@KQhj"AC[-]|QKEiݞ#~vn=|(jf2ku`X"LjK^)r(! Om`)ƛsA #)$fP;FFC4Rnu4[mMr=)ZcK(T۰7Fv ^g}I |&644ޮh-X ږgfk, gS0Se*i̞ 9#:rs.p'WJMZzp%d2|*UVSLBI WZ3+) L.;LWJs+8'B5U&WUVS+3+- 2L \ej;uT9•ZS~Fp Vl*ZCO2W|W/!~_="}`0?*|/d{.L f/P^BzWpYBERBgcSu;;$8eQ'Re prHBx1Bfw\o{rnlˈu}^B,?O?/Ί:/RJ8r]X+`$F܋΂1M DF|<p 1ei@G NQ՛zMp.Zb[lL)>qW3N;-}vz]G/#x 񈕙__˿ )RJ&$+ .Paxbs)9w x%uLA"Hx=C2VM׃kU`pG0MMDh,jkGrqE-qB嘒FtfBcP+p i㹳j pLEtA!79;d]6Snexo. ]_=ޭ!6ȏw]VU\y-K=|] CЂG,I+ѠXpjT¼% @ (o*dl9TIp^ ߻=+:󕟱I gJF,JJSn9\x %"Z(Ad P\nϬp+%C!Ii|xh띖c No!V >[4mN_Wp 1mRFP&h&"q{Bk(?F3ښv$%n!JCݓ$= A*Cr`#Ud=4"g%O^mYA+J$p9b]b$T}֋+?{~Ⓙw!Qw3IyPBIޯS?Uc2Ս2F%K%( ;k׻O/bճZh9rVBCZ-|&'ED鸞\j8nqAc{ڿ(˃AJ6!2xHU•}=DW^N/?*8eJ S+r +p wS},ͩjݙ6ky\_on&Wo5hw\o~u/k nmLol i}H\nV?4r01(\y'{ߍٽN6MnAWGedI֍Z7W 8PǙ?##~N}I/hg0cߩD״n+dǿ?}O7||o_7\I1\aUu2rf:|{c?oX(;8Csw,G pwhqW].~seGbc7o}h`M ͆wZ69jw&\3?VǕݍC !u)u?3m66FiYد<ѪΖթbR e g f(/Ǻw gmEԑC #2ZcaȎߑNu̡FI#j܃H1R*U0 +uO}:2C¢N Ǡ9yK(bJC @SQ]4Ha ,|8X.FLN w .P\BfHݷ=i'FȎ"J,$>w<՘ļ|Uԉgv߀fvZtNFl8m㴇lj ]{%S2No&IL"[# cC*ADISp֦xLU8p%F!Djxws:ݑ:kAkJ=.hhE;c$:@'-*Tnbu̒AA2.b/7Pb *;l7ȯ#X* qKnu~n|h4Py==șT|{Q)AԷ9-(kFTfjZGҴC-qq{#WG:Rqq{(O`c]ZRgwj(qݬ׽vz5(gtkr<^hs4NScp :g(瑲DB0*QH" [z{[u{W?Ö4g"%tZ~ݻS17r1EMT[4!ids8zTFNa٫JI!_!Z橏ܡ(T:RDo*#gQY8*\oGTn/^.P|Dydt[:ո z,ȓҒovY=S6klG)*PKYpK֜saYaaDTR7\ 0n U6RBrhaq!e=k$Nd 6*I%XJ AidlȸYK9 6B b! s&7z^7Ƿc_p\_{;GlRNJE# `)IDIJ{OY;А8.Cl`c2lT^r'yP#$MF& n#g;bA?,'6Em0j[ vsڐX4CCUIQ H6Ty8B)mBjzh!fH9ZB x"^s#.$G:*tَQ#F30<YÈZDlq#96 "G8 lH*,eqyͣ*{W`9t\FclGĕ,\t<a\[\b"kf J(J Q1x`rj=kBn Ȫwj|iN2F=_V[[)Zbs7Y)§E5I̧]q[<,V2Hf+`O0W,VBk'uL1հWNJזڴu Ćy ݶ 2]5>()NZ;%[9onVyw9Hʐ4uߛ\+Bq+:~ 3F.:0 xŽj;Yhwlosf:c{f$Uw3Zmg-NCΰMMxWx&z뙷Ǟ0l;,؛wU6LDI}=TVnn]0rҢᄷF\x NI D@8a8$ByRtuԐwmmH~„K`3m4vf/;hxI([߷xt$Kuut ݱS$Y1enr֊2zTҤ3޸@¦UgsS(߇^Ps;V}F~2x N亮Z@r HIR"LLLTL2cR -O ބ`ܸO9 $DJkRpp&E6ij%hu@:E =19=AǮjU/T"8m$ ;]3`BFf ַmQ3ܩ~glKn[z"Un+(o܎:[SV=ьrJNS!tɽyztnu+ <8t`iAsdMFD.5D⏖NPq]'uxt,\knw?M?E.Z楰6J4(ڀ * L$os"F9oV+2OF7㐈RZRRCz;sP*H)h$(Iܫ996$ehKiO*(؝Or *!ZG]̳, _(b![B4`A"V0-8?]d(tH(! {SlWT15I{o$7z{'sOїPBg .>O _ Z0l@%36 NL RꘌiQ6, A*GO}J$K7D(pTI$[1TkrUCFl BRJJXࢌ e:gdJGV9kDg퉳r3@\Cۤ#4!HS2MRVނV2l䬱S+,5x ]j~Р"$4_F <|MW3ۆk6ɋiy5Xjq2Ƹfmڨ 5ڛidf<̔[|A_~nƾQmcn,1yCn}C~Aj9G< ?|stVM[D~ r؁5JD*l D DK,l`l53f!ᄡxBhΩ*s."cTh jp`Sj`'B"H& Lctʑ h\w 9;^ikR=`>q 5}Cn麵»w]%;"76MX#׾mӒ|H\\YJk2kaα`]mn7Mw=5"޼祖a<mot{wyDۙ~E<ηt=]Zuƚϛn|5Ù+vMyt2VOP|m7[&a&zf7$L`B꬜vr|Β/9K-9qevs\*||)[ơAwtk/kt@H'et\kU`5Ny0T !E%E\6vk g3ûJݺ,muaNp.QIPyOoo |qCVPcH0ɒ#V0 ЫIHp.39#Ki39@K"༐e)zYr!;=gts*o%+,2.T"" ^z퐔3vF.UG li<xˏ7r5 lrlD׺|]12k(=E-d짣eA"4.O.WIL$VTҪd^M%<!m:됲ԉ3@f/Q)fBE?19o3I- 7yøAh"ڻS,4džswWqZFZXщБ֘ãτK9]\ 04 "=ք7@a}^!!`GD/: $Z;!Qf "-ewRpGsg[ePMnQʽA8"JAQ.KZJJƒڐ9n"w23rCZ QˌDkqmɳD}p"DG#/X%*']Pj ȔKz+ibVh撟֭Pa33<ä2`4QCMP fm0 Y*P'("o%ǟB6'^FC`3LBWg ?5q%O/!x|B)q,ANF\ZؘdqkEܵTýo["v3#އfh$5Dofmv=!t{gcdE)`h 9ʘU`>r 3(\%Ȃ$Y;o668w:9ƴs2&eH 1Gʁ%3rK89yybzn]=S2G+gy"՗wbrr%Z='mRל/1'0NKRWHB+B箮 WuՕ\Kv9誐+䥨B쯮 K<[QWS!ʹ?(.h콉F}]K<8Q3/0=/v>Mxk0Ѓ%a#(zտe8ɷa(ۼM>v)Cϔ+Z)Bi|̄S|KiMiO.hj3.HzBi}D:[E Q|JIҹXRYtUvNU>]u"~UMHA#qFI^%ǥW,2K;2u;剞/.09V+..;y1.6P+̹؅Je]l+ $X*~1ꪐk.h֝y+R+c r˩uUg ]Hue_B bC +[)U]=uv\zRCE9y4ȕqԞ* r&s >eO$xf(K>Uo ۻ+z0.3ʑsD8s͍.Ww 1f r?!g 8N)R۷aht|n)ٖTq&~TdƄ_Wc6>~S={F3nKMn׏RyX"iIV2+/4{VK8F`jÀ Fq~u#f}X]O?"&\@^J>IuUɪQ[h֢Ϡ_7傝ۜSI\um3`KI;l?~9(0KMfd.Yأ4%&rr\,](`)IcU@ ~ýZ.eo'‡휌tpohaaXO>% 6 jxf<[6&U.G.]%ɬYtyϳFޜXPY(6L|˫i Kj0;Ŕn#Q&k܇b2 #kۅ+26 |@&jsr{Hn պ8FRQIoi?LRۆNL1kZsBV+n '[\ms`c:BP$ I7LPj;{/vtmnX~&36bۆq¦GNPiX"@'}=}37fóKra(jRO'mt7)]# hTȜrRL\*e@yL fs4scf)ω-0!dzϲ k4?~x1^"xCM>_kr!kt^Wuч{B}u-ăzdGKe:V}F~2x N/))"%fK0UL&k}-Xi@("7S 3:T4( >FMZ sA4PZ>(>vX]t,Wxi[2@P|KH `C0!^/A.Pfȑ9|9;Νn-N4?uR}}iEpO2K:hFD@84A2Y[Jً{"}[$AOx=<V035;9OraW̱輰s"TT<f XHV߲SlN5J4|Js48qRuAނ*阸r ?rt4ʎyg`/l B>ɿ^-Aӊ3?+갮wx:W)B͓4Ae)pSXiA A ̔A:9!>3 \L?λ10>6"rG:t(5̱3jTq0J('*9uo#5_-I3+DG#QkD1`Sq ȹ?ĽӓZֹUF:VUa}棎8Y#aAj:"N9 cҙyRs_Meqǟ_t??~?wwO('op (zb8^q՗zGP77,On Օu 0 |>q F?y`~[X 5?4g&Ms 4-Kzi$U6.Q<#vI ;\g3a!^FƆ1uoji*=~k &/j0~gt5 } tcDj${:lSQ/mhL&Aܰ6WM;x0DΘ% F =mݴیYmƬ=͘>xn"x",&Υr-ƈ"1dG>¥vtFU7cq9d*nzvuάLUw.gDrhþեz >e U8USzy8JeVj?wGK,]6j-򱲲F3%χAQnΜ_Smny+_Qqvfrum~KzM[z6|_z_>6N,֚wdJRݬI1 jnU|D*G90\VWߔ6WY/f~)u$&~ya\vwzcE2Ao2Wϥ|&j`zՔh8HKU60)yH9&`iL`F, DxfV߰ֆ'>6)ാ 0O>Z# R)3bF;C >𩄥/Yr'V`,UEHN'ƨPm<~s_駤Wy˃@8D #V0NKQq:D(X}cqw,NhćH>q~u98 qugWq=4Ba!|)3sj6{n{)!LjK^g(60ipƛs#3VQQ67T﬑P*FxI.Rg:EKbLx-.-z?ȳ:Ω V{\BaIXh5h -r3u0nf(.%P̲TﻛY%WlXʃ,aW(-cr*K WrU\¡UV},!-\@TyHp q8w["U;\ɜWpJQC(? Bq)WYZpT +ͨJ3Fڳ`,*K[e%•aPr@p`*U֐}+rC+櫗DŽNg ?o~|z<9{L?&1k//G ,OڸR*\VFPy")Srvd I3nq*GtI^LwRb0Vޗ%:Wc1Z "pZk ^cuLH%@K2i1ojKÖ!$݀t ~1N6!Mۭ/;+myF_) ͯ7g;dR+01r%XkUD JRR<*g(',jm^%T"d^q U0`Di@@;+pEA$R%ˈ7\4KN" PT0P-RBD X+ 999S1wjOetXr:(Q!•"3Nj{ç4EPg4wNx`"|LX1K3?G~<`N kTV\LE$%&ݝhpyGBWqti=32m{ůqh0> ˪W|끥{=Soy-/ƈukhl.n-Pf+%4w?(rK8$&EЍ7&p]$HUU3 "AJ$|8 2C=$M7M=(F=АPqSsEwmd=Auo!ݺ5?nvjڮ/drBE%2h\M_KILч^-SiU-KQswMCqt|fOu)Eo ,q>Mhw;rN!tڑ=:;,·&EA+L8c'x+Zx:I- qBJY Z˒(Cxj7^H}4 lL-J mtM]$pqƂ89QȤh-M ?pAם܁s S҇UkWzqYi<ÏՒ]O#}{=kT.7qߡf&p,)'5\5)%B*$8) V:'Ysi)UUdZK@hrŤ N$ck4wDt2-zgA~ 4<ܬ7Fo%N![MVq|]k㒶2(D'4"rsÀB(B2>) $)`!(m 6I]GYP{@)@qZ7RٍKawx/ސIaġ rl{~~7?w$9JAh_aja}Ek.di]t($9M\(oAˇi{esu%#3,"6;5h d_:aPN%9:Պsȵm<ۣ Qfl(@Jh(QYm{5e^@. }8'ⰫJ:[^C;X8o{$U•8m_'>|8:?ZMJA%MmF]q*$NT9BݛӤuo<௝'?u>]_My- ~G̹Alޭ휣Ymugn8[A~,Iʥ# xeðtyf1E'p*fM~1N޿Z5pTFV:QU 8#y`䮯[_]nɱ{>T!OgY8~܃~痓?/N_~:~>':hټ읬,>̄^73ο~io!Nc»S|xy'\gm0#8\Ӿ`҅1O^o,@ |hJ MFl2*9ɂo0%dܿ?W7C'B b݀2jH1VHvZn\ꞥe8\$ , S0qc ~`Ix8M<@L -Rgȵg_΍: 0$r#" Jkz[RZFL=+iFs{G!9 :!T@8^G)hy\I}`Iff$B]s!TCY'v?Ȣ1gm؞ru!!&CF`2AC [M_J!.qib FE:W`Y+6sj \BL!w"fUھ"F}jx!TS]T;Ǎ|tLEs(j'EL$jJm{scPY×YAdEws+l j;r4.r'W?~7~^ L"j{>Ȳ}Ӗ;!w`T8o kp@vkH념")e#>>k\ky71#4dJ)) o\LT9H蒯(}fd"~+5;rW/&/Ru.F%3xCm }罥BD Dy !PU35H]^OM?3VC13ɇTl)цRCTD\En Fp` x0ZI@D\Q{D[Me24c@!\O3.Ty@a{1rox7#Dqx-R/'=3jt۝/9`}#O^ud'v6{S4;EeUByK$V +]0P$^L<[6let !yOtLH2= CII8;Jp TiXq5JyYX3vp-W1$aPi}HR{ՠ] .nKl Zk5JtG8W*pGCI6i\@nx s=,M4(٠{Ąªr6%v1rKl1 s RܱԦ6R"ؕ8%Xh2 U)L VsDADTJ}DTȐ G&*,2 c7z!@ꨝ+,^vF4acKǮJ*WzfvGCLYc >3>qƶ6xY!ZDiq#PhU-XrFD<i*y(j\,(iCu]"/,yU.x<>0*+mo)mmG]!U.C.<,CQXDؚJ(ktqDAT\ |pHF$'c8İ@cd% 4GhPWP5V̪bVT@n&QBxYL+/<5T c“C2^UEނ _BBӮόN'jvM-yP+Ik/[n[7.ǿ2[}ȱu7ܺ^=.]͚}z5e>m}]Ol\0X1wsބ7 ='8x>N]<յG؃YȐ3cM7ijD6i8~ժb/bw[_6gZ/|nB'M;S.K.%)-7$%2FJECXEkc?2Y$2Vp>W}uKaS`\(Gu.&eJ,Le9yMw gB >^,I^9;8T*kՄy!K9Jgy5e3־H_8S9$^ q.>͸S%;pHF'Z7i1z:PLV]-5,EhX׆HU$% |^R4:.,IdWlrjrg<*5ѪD=]oGWF} -~F)R&)YgHjHq1ԕ1l WUF[:aFhJùȬ3Xs1ۏ}:mǢoE0xV*&29Z-m.%W*],H/ R(!^ڠLiL )&==$u|Gj~6ϢtPp?]lPpg%5Zc ;:ZٔӍ'QQBQ0R`D$+h{pԃGco:& B -X+͂^I!X$"Ш?62wY,Rg2EbjP1E4^JJd0D b΂wDðq$386dd1< P4>X.@uD㏣ 6,Z;/_7Z^aè"<9J!BPkρbH0`̛y4TFCK+;|ѽ0/gŮX9mo<}KNzm-d8Û'5{[s|$yBa]`} ϊ"ʾo2ٔC]Ȥ'zIEϤ'6NxȱkcX(.Uy:S~GFu}hQG& jZ^{9(_\!2L)@:Zz[@E[AZM27HwP^w Vنb8NgľtjdH̴*qR߆E|wX{f~44j̑MW0v(ǒ}P+;<:}ƃDahN:5CF.G=z.:Lk.PHbh՞<#,RFXp4gZ) ;:JT!lyؐ|ٚ(SVQ$1O0%jf&%ej(RdKJ:}hu5fGrufF6Qaь,Axnb4D=ؿ6G|#rɜ6M$9&r6ȹMwudXD.]JrX+B"sD67֥#4'Ư3S:r yݡі SX'|~*lV=y C";ixTj'ucQ;T KR!4`EWV{'E퓢>¤(Cu$t[F{$ a(HS 1"^{U@劄~&7*ےekZd!M!)mUC)u+.A<:>WwCONhK.142hU$sg?+y$P1C@4AARvyϣ|(`'h|w]Mvw"EM+ת?7'#{ X@ DaP|OBxMIkJYI G6U545Z`&̨3hQ9b0XJBSELf6g=>N7>byWo=n:٦,-vEoY>e(O eB eTL%WL}?bJ1bZp!A09]oYSn/!NFaK<)5 4H X:S!B{N!z _x^vOVkx1_ M#†c6T(=Gm@y,V _%A -UG:RY0!S&aJLiJy>%X)Kȳ~t-јKz`Y4+AV&f`I7/.Ov7 i˶l 4YP9C0bl_/=.{'T*i/\؛׮"["S 1LH8w[ާ3L /ΒfZ]mjx?SL"X3&>%. V` $_:"a5:RL݉NÕ~9@^.+a@Y %l\R[:ݵrHyXIZ/ n^@]0cv:0(|N߇/ŵ|`e_Ya"Ht^th4g& KP!U(ɷʜoT[V?zfv9 6 F1z;+y0eT߮mQ6fV ?4{P[O枮ں!(MX^#0W2 V0bb}mk0zmu1ɶ^*&ssٰ1xR7ӐݧS|q}p{GFyQ4nl0np} o~ǟ^_Ͽ0Q8uթMoeUei RU3m#`R/ɡ:EL⬑ߌ|__Np&ZF0,w25%]Sźt-rze<[z eNCUz֖%h[mJ*1 %!P5"PbF>k;.2K?+à z#(wR^2_#kpX05)( ~{ ![[ DH-+q썺,$aYCrai`i-C0R!̼GK+٠iTp-''$)E)#{MBM@m[_F+-Bx>ŝ1Hb^=zրѵI8 }P)g"^W &uD%ԖhQjc>ںJHo=ԭvBv}Fp8ybiʶGZ*tQ2x0QJ ^8G)W 5Y־ug(z[uQn޳E.M}#eXPc]5᮵_͵Vy.<߂w@ HWu.{^8 ϝUx^!CvI7<0yw*I7n؝y!y V;oJ)-~<Lr-Y<gvd?66K7fvnAh %IiZ{ϊ;[P&Rb`72BM*K}y!F)-7C(cDo]m䀅|Kclj"͹4v}aFyai{c"Za1S1ģH9Dۻ(lx<Ktw&(Đ$! ̫b0\PZ;,HY2>$<`#m C0)s$ zTX-zg՘ Hoe4z hj4BZ"2G.dNLj"1Ux$hv5`O'!!F@_!-B"gp@ԻЌ:^#&+ X+<* 8CvY`I(J1,,VJ\fF6݌.&UM 鳃_KQ6:+hvt@1U$pK\+ֵ^hrVMΟh)⎧ ؆OzPV!ߧ4\U(Xkȃ29ՖF 0q!&C-*h-JuR>RL@`pGm ؓ^Ȍ ^`A ƜB*d6ݒ1V& Qƞ^U.G6ItU/_ny/f8g.i@SgR qmЂR!<5k* Q*g Fː= JؤB:``j/#vHHLr9r5ێGab>:8ƽ>F428**K"tZRppR4y9Ri7cwmp). 3ż\kؑK2ߧjْc-i 5]]<fzDU{Iuc%/EYe=.x=> nHz2% I;Rh͆{.pXw2pv_sMOӗv(7/rvŴ&Mp}H؁86s9mgiշϱ¦GPNf< jK(EӦ~OZVaĚN븜Aucb1H\2Yp<0cْ~v9d3 bՍwQݸi _"ǫY .ۺ K>lz*3 /fH/3fcFRJdT@DMRg9kY 0Ւ{D!B΍{`@]^Eh@D8T [ biJw_nm 6 W<gԥl\R4K^dLa6+ F8Ad%8%cG:3_> zzymc6KDXRQ:n,hKUgOC/ aq:p.YǴ 98}8Rvpс0u;S/T(@)ƥ cK/q M?@MqQG߷2hs?/~+ni;s?og-βgmee|52$YV2)v͹%Cg엎m/ M4ZHǞ]E>tYm/Tfte ƥ&x)=׻G߷[ӓfǜ]`fG䙥 x8;O~LDnfu @yf|oxtK<tgsO Z.e`c&-l%K +ay[Mſhi#z&lYPZ)!rT!+,p%䊉6U:o'6_Kw7ǒ&[M`|]y1QV"ۙyܫᢙb QJ._]X"8Uڕ?6`mz-woKY_Ën$'#?{^^ yѝ3O̟.8R#sޡM_*57.CiOD ;TeѩvwQW BTN`M <R&ٞSNԱN]USY.i)HSFk4q*3WeẖY[ FǤ=ħ!]7e6nxpX^Y#|OD,*ToZsmWz>Vp10Q%YxBC-m=냐 # B.B aFb$RR{% tPY3'=ꪫ&%KYfRy qYhȬޡt6^ʫ0vG\KKԘn\7oX|4% [<=g55FO]j+M.5W). A?!:w,%iiL[WLHYԥOTkiW=иFRٙZh|v~ϓBZ ݳ;GKNu\svO.>hIF ߆߾>P{:>zm~m2ߕ6Yx]hV7q+ib$DTW;PZk]i-ż^^aqQW<ە]Ms9 ΒUM7"4^)\q='C)$3:{ qKL@D*ph47 >E218kHr|{TW"9`KI>O߶&^13c`m'lAO&)s]u~HLAY"c:b91N1piyN YVs7:#f'0rkUz\ ćgTѡdv4))j+3V3g6WxjFgqoX& [oJ3{fJ_ILWW ݇)R%ks$ &n+ nXfbC>?n0Lw0U -}iɠP\2N7&߇e08uz{c]6WrbD0_F8/pw켛m T[F{3dn+\Mu-[[}s'TzM-FJpt17%1ܗ qkz/ Z&[/Hђz.#-u*h{>b%>O1> O(OJWJ=\pegΎ{(OP \٩UV±UR~B%M)b0+Ɉbbp+qBpU Wܽ nWd-{pU++7o,;hyBwf? T$'n'$'ow^}϶1p{'uykyy8PuXx$K0,gG?e{q: g "bhHR ѥȲR \gȚt_(J)GkCF&srkYC, +)VJVʚ40;ƯC Ch"lVؘglN19Qq 1]>">?k|"-o$c\ &A6 {cC$^)K.3C'0eZژ V;u`sjd:EI#۫mgďlC6Z~bn?M'ۡ_nH s0,'e EQiQ€H xY3\+Cs8 8Pgf6:UUY߃Yy|8vWR=)鑗^YHY WRW*i6muZy# (I7i":rw* 1v*Pewjk%wNy,yNZ2DiH˝#:i & 1*UBb^g} e ą`Q[ Z/J:&аIGs5s6~~4__6)KIb; _,#͊<}Rʡn+>]Znh?&XUlbh;ns1(t>EM@lM#JBu 7\=} (u $Q20V&A*5QGHZ +GascP%Y[FǣНu#Ghe1$9jI+z# ĐwmV@~Hm@]KC[kYJJ/F$k$٦lə ېsx..i+)!'|_e1ji#$Rd!l IҖ.<= G"HAqm-Õ,LMY7n#F~A~;B㒿w&9rmhL4\V5fL{L WP Oe̤ٟ{r?uOu{x=3f]'SoMI!gԠ0SQB9ag7B+)gn2k?;L``QPP.c ߣU(5 l.-pB/*amJL\ z7fpfFᄏl|T 4h/^yfhfꡍxV\{*U?VԔ͊g5#V{lCgv_4_\]RRyZ'p醋{AH[KR$斮ۚab1pTc`|DO_[ed{A6V%,$_t|%.u,įwF8+]Z+U&~UB~>ߟ?x甙pxg` q{%~X^=2?~0? #fZ>,4d=^G=Ozub̳'O˃ ?oRMc{4J=[z E:bC<P:]2T2ViM<.3FfuΖT5J'H 9~k~v1$(  ڨ<QJO5%tb6"1(כXDl| K4 73 n/a؛b|@& gqkm~n~^ GF-Y7/ӁHv`0.kOk[ pЮ~xvL땘1}L~T.Ny7°䴙T!cXVJIYOx JPa*$X$KB%߱oUcsx\ԗ D8׀܉Hk5Ib<%ƸbLf] H׊*4ˑ3آ zAyo@Y"1Z(O"8?=Stźy+dez8hq}A&IQP n-h% rE7* dX-}e24@D3.QTyĄXwvoTȱY1RWEqvɿ; {u$ *+y]}rк/v59#Zl)9&󥽹 ϟo>QT#VvRkrnn# I,IV +g@0 vEQbLޓT 阬gA4) !*T J#cFr\[OBNb!' =c~.ց"vM&yg*7n#6B)^["%D*Ε#KB"&maQ$W2NC\d{8˰ɕJx%PQ'툱 ª8A6Öna\1ZOEmZi,%&PPRj.TAp&9RDTJe% H*PBFfHJсF@bLPH!cAΊug7B}#d`<[j}<YaDd"vN3Cx;tdZfS٘&0F (<0eYhg GPZqGj&i*pFKW'UIYl<ya\.vm QR_|^iu[L1\&aQvQ1&4CaT<P< aTsMp]{!;kS[Q6_pNN(l'+8; R{X$ fVإ wg}}'c}Ei\UVXc8H t6h:+{t鬞07?N0w>H92DZ1VxFH21iB4ПDV6{lINUJdDc}6)0x3A_'Óx:vPk}cSQ{ͯ/tϓMJ"Ȝ Z=QLb0DIyt`"!fw+ny%D^ Z&-*YFႧ\)2$gZCQ`Z$BBC"kDfZs&qsrc<՞ʚutb C1!="neL>\\[Xb{5yVM[52jn>5y#4tI*gq]Dpg9Lvijr$xh<BO&p76&8(2d1w"!D -ROB_t;E_637?JS-ZjNɷ'&@hE,ѧȵ+Pw$%M(+Bo: lw<Oٽ&{{.(9HdHV LH t4 HjIbJEHz8^^!]TG88eyc JO~:RiKs5zfPi_pFIAWB^dHŵw@$@UxǓI})d%A^r$l>Q "VK5s*rC2^9Ɖ}p#C焰zX7ve*#1$A.D6HQ AwpN-{oXߐ½=mʩNv}%ٕ3[K0uzSB<',|:&Q_ sm,+~&M& %O>?<Oc)dR'9--D@P$Εedƈ 21&罥Beh5v+Ew;+Ҟ źr-sMw2p3WjBcqƸmug2k-+pGY0:fsGYk;nNL:}:;urkfsnWfr!e=v<$i.]J H_| u.Wmz|_DPyK%hluǚG i_ٞM;*GuÉK΂Ů9w][Bos~G,u+Q)%ܽA4KY]GG w!Jc\?YZnh'JV?U \άmZJqpz7X rV4أмM7RS &gVu N0wmyJ.yJyAYExG#{-)JWX-S"Uݬ/_ikY[d4hS`5Hs?$#wJ}S]vJa^)1,Dǯ˾4|{bbVk 8<,YMXeZgOt1aNժRfΎc5`(&41@L 칼t]g5BR$hrF)f,g.䢘Ra͜G.5ϗ7l~.m{rjbW!N@p香w ̊n_18{|+!乔c-r&C :LRy)AU?;Ry Nc/A/ 6j75@.E%m5TUeg\:'.KE-FւF0mn2US1.QTu^i[9}:JpQzK;o|] qAQ+)><N畨Èd`hdڍEYk4+dޡ8"=hઙkGR&Sf% !\aTbpTf4p=j'WJ\Crc#+j \5k;ujVnLpnJrAĮ8fU֩Sd{殚ޏQZY9]KʠJ0dY{sWJہȮh0W'lk'O3׽2\=JiVjO0ա]΍`hઙXJ.|jVj7;+T\&\ j,pլ=LYawWF >`FW\WNJ\G?8"\5sWZԧWJ\G 7\ZZv"9_|=JiުՋ/sEҎ$~;u_s_a~E3rzQn/?݌ȉŊmF@rWY^SAigO痗ZW&ևnFc={ؠ y%hὠ{in]3>V5 bLnU2icz6ɩgѳv~V/o&z^~T͉|Lg3P/n9GuE?~}W!yd`T;)NJT%ʔ-SAUZ<"HYP1(JhGX12~_^^]m/ovf[xH_?lT{II::}h`ST*W *$.Z85HC.t)*WWk"X$:%8r@ĤPR@t%Z[r$*9?Yo [Q̺͟6&]M)V.D'dV#kbUDb&i.c15y`(tK*~qO4{T8KʓLUy+$T]%+#&.:ڂ~#c#\;* |)ngecD1ݔ]˫fY3ʔU8ܭIk ŪMmEAx;1SP C~r5彗tÏ+Ay /[狏X6ޅ}x.g>ٵk7 -wC^W}ŷfof(=$2lv՗.InkmONۉiӊ1`ɼF՗!jONuuUku1+Ri0`AK6VAI?z{hOm~dKT2k&ɰb*dP!$.T е{b) `fJ9G09 X]R$]nyOzϳf<3u:\*UjyEItUI Yt SP:)֥Z2Vep5R5|o}%i*&KxV9.[ޫ@9|Z5ZZ,)Uۅt ][;ٯz(J~J>_.-hՅՏˋmPҠ^.)>yܤ.zk{V>.7x},?#d) V霓M5jxD,ƌUr#` v0CWdtW暳jE\\@O y1kNC&&kRꍌُJ7,b!Xބ[ڽ]iׇMZ7.~,hᇿj0|q@>zSĠMKt1&SNJ-( bfSS# y -KQ7Dəd}NXDW'{SI/s#vFy2 waB>&*A ZU^;)zlԦ(GH!'>D{Z`H,dф pQtсG(E\-+G7s2Zι4`<?ED"xuPE.Gi[HE"[ Xuq2+m_g($dotm89S0K LZɃ},Ypq1:yɡq'\pqg!8B}@if&s6α(GJ31P=dpx*x;CaxxU'zZy9F 24/Q07~|ݏ ͘dFZ5ǢCݬE: IP;j8q_ȏ˱}9|;wأmlb( id%SeڣN5i0STtl|nZ#&Gm%8Q;].@T N=[˫Mqsu!L>Ňvi2-ڷ[$qS[gu>xC1)l}u`b,-SJ%kW|l~)uU(Y xGg+L֐ϡbm~%1kU x16J0 4h=A) 8~}e}`; Ѥ)sT )h1 s4QW.H(ݢω)]j%f?zF$R;; ?N|^7°Ox4 0e/) VFB*, mcr5Vi'HjyJ!pLXuڇ-3]@wӖ_wz;BMXvX07Z4/\n,; Z7Ӑ*7%0%}Wwg_Jǚս`~G]Cw*}X;orW]l*9sTv``!8 M:y,)gqw,Q]˪eKU;F,'HKƀ^sNj-qkfT2[z6%ԞJ"ۤJt}qu+,(h-Jj|L25Dc)%"J@(mo${j<.JC)v`GtQ%#P!:LMڣWB>ZжPUN'T_?4yK M[ 'Cob幺H:G92">"b3H} <m#߬ߞE%?K{/bZ#\ll3"ŷnns]ρDZ D_~ks0S.7,瞪/O?{׶ǑdolQy ,{< BMeS#s{&lR5d .fFFFF[<-=2t*=@ۜJ :oZte7k$rgފ„Ȫ JS+[`pe+.Ee~̗ҾzOd^;uޖKKWgvՑTN)n7NQWM I#C*⬽-#ܫ;cZ_}m3~oO?xll11l^goVַ;EZ"yys+b'c{"np7v,?a!2_9 l^l|1?8zybv}hp;~Ju3ȡ^j)۱.ۭY#c*-I&Obu/< _V櫏O /O/^{۟^K_럾{'x$ӡq\3Y&~dZ:ݯ/2[P7C|p@Wfu4'y'zlyM :cv[>09c x]k5VoӵtLvQ~Dd@߯.rB<0Ӏоna9]48>{]aJ1[Sݢ- d_@C1o~l/IowAypg?,_NT H yQ4=uλ$JX-Zg,r0.D~ݑM&gprv^HYh+ƨKhkYU6Nf!0:&(7>+eh1$e~nFU6m{ta[Q3X`yOt* _ҫr^Usb^Uk}sszUUuAP0Ԑ<7sTYZn+k~i%KD:±ѐ1cWUQi u{{Ǝ׋G$C;臶D+d-L| >л:gU@ufw:dwXҁ'}_f->YPԾZ`{6*W3]ƺ7cn<- $E_W dcoĮ}n><0H^bٟ\tJCaEZ+kFfĥ+Қj=K$kG޻zksA=ìHW7@0g@ճw/{l˯sIBYA&'b`Uv^[6h&(A2'stAE$}"$\(9i-8*JY%U'@r4JcFtk-/dǴ֕¥ B>zJɘ%sdIT0rpU%A3uQAkZC@NXX՘U#HGI\!`M"4`&dk1U)+$5G :UIjºlG@DK3R%<0dZ"mkB҈&+cTQc6&8}LUe4Z)T,lM~%|-<2{)A#0 D'& z[/ΐwThN[鐬CDGdX#dvN)%r\ Ad[UI:Qb%2'.#DA)I,3f[T" #+!w#LxXḞ)HS]);,hb_Ike_3|VfLHM 1G̘ƎqeXOJ=j]L:%Id^&8(\!Iˬ1"p `-S̡mů*eqx(F $JLdX56\< ƢeU6!c{:j6h %bHP-:;R5ß1؂O0i%s8qc֢\cހmxUB$qzg.ؕQiĕUJCƢ]$ݘV y zipUTIX\$A Ͷwт&vcg0Xq%0c܀kq.! vmdio$e&f@57._R̨ 1h a#1 ) D&P4TRD&)dD>U[cT tXY;v(cr)V@g*X>F,qIT2Ӊd\9?H倱퐎Y4L5V,q޲PRʙ4[y/렱PF0 $l@0E}lU2 Ɛ%V`aum;[F+{um͹16 (kd0u*ګl%FgS acr`0#MA,R4(ujZjbs(Y0Z{I3) 0#TV!Kpݰ(A/QKC!fTs\Π+d{( Hӣ.JT`JտSa ,#)aIQ B>^r‱oܪȰi&>YmW؊PD\$$$r J%Dyƴ apaO -JFj xT##!Qu W=gU4?{ ڈR&clVnlҒac5\+iԪTF e{KI&0RDjJh-ҵgAA-իhx3$ʡ,aw*C Y0m@xxX@¨ ,gaFZ&A- 2r%v_HO#0emOQ 9>X2'Qo3;jr4rUoLC,ciVS{]L Dé K]ผ&J#׸:oJcеhDeo2B{ōM>h,r^ =(VPBP:R]TvX lo:-o@N/~e-$wlx}DUH0l~6)(n/=Y $J/F\ sdq50J'qP\1lSHWV^꼉D5duo[fu+eW$Վ@j?ru֣KP\sH>MC?Vu& K{65]4v)iAwIqh ;\~wöz?G_=m`ywEvۮ ??軝{}OI='$Dߓ{}OI='$Dߓ{}OI='$Dߓ{}OI='$Dߓ{}OI='$Dߓ{}OI='Wݗ$V[刾?NPg/}DL7>E՗;|5wNtL( :ΒތtPNj=*^-b٪"Gàw>Oק>i2g;P6]ͺ` K9B(|J$S(hrw50^'6^==U@սV>TGKϯnWMvq6竛.-.o_bXnfmw??mIފv<:&u}u>ufܑw7+;\٭fo/~gZ{8_!e&@J@>dd ;QODҤlY[ͷĦ(I5"9f7qN=fdeWW[%,˼K}w_랳H8y2E0X)u]i΋?u{K9Y߳4d%:%~>2;Q]=;~ƴ=c\1x5і4gslYZmlymf؝0rlt ]:(S{w{܏MAW73+!)&Od&:Ueftz6m7R8X:?׳m_ ^/b^^vmLY3qĭbY[j糆ʻ揑t]o;kԞR\&ǵ8SՑٗ"\6uю7$'ޜy ?UꖲlHad4(a hhhhhhhhhhhhhhhhhhhhhhh5uB>%AY AѠTLGo4(h45 gݛ4qdN}qDO>Ðh%I  :>R`,U2 i)<jP-.St^H`Aօ ƫRd]yu8YvvoO'6xu9KWʳnzyVw㩥WXVڊ@_ۑFϭTȾ bP~$庲YTr\G:*QuTr\G:*QuTr\G:*QuTr\G:*QuTr\G:*QuTr\G:*׿^庆r]/OGd[rrz;*׿:W'Xp(tg%RREB*abA= ϗȞH~pKֽh[ 8dgx%"ꢁ hG/PW DSv&;n'^Cnl||m~=pCeLުVnQ'}$[3F4Q':iINuҨF4Q':iINuҨF4Q':iINuҨF4Q':iINuҨF4Q':iINuҨF4Q':iI=:[je2uj>\ ߾94HA 5:)ɺ+8!Yіuר'eF+uEnMֽ6=)8}@՝ Rm^ўh/;PՍ㪺Mb)p#KAo5xFgQfb.;TuwU0&({qr.Z1 ;ʤvbpNWӿ}OՕ;;o %ˀ 6I QUε Fh\`}PF>)SgϳI睑'0+Mv|s_e{` k(v͞aEc#9#ݿmUԸLzc"/V2:MHO >pB : jVtVZYMa*TpLŲw5CGOЗ)%iCgBU/uu!0,;w#BHxoZ'h;Rb~5>?UTdOyDscavpj'fWpͭ}'rew(g# Z\: r WKWOfr;zwFeu0Zy'GP64KdY]9n B'+lrq@ey3_RmT^a,"0^"Q[*FټX]j3xܳVk, k=z͋޲Y]4+hgYoeg.˅c.E*-s)( C ~xEN = 7KUσp8ZTt0j[A\oP`}[}Xu\/ꕡ2BUc4Cc^6p( d     ={݃XYW+/Xܾziz}E=="wEW_]urLqX9#(QDU)xtY!3'RDQDkWUsv\yȲ/9LfZj=eԿ:}4]t^gږ }ێQY>I*ݭrh EG}ekʞǗӰ9_|6;+2@.R3kN_y ?꜔Ymfk}n04X}+60lm&͔Vܢ˛opOi .7K.:tu|4YyQw0itmJ2 R1^9jNu~DX,Q*GަwmJ,>L"#u$bU+Ƀ1T tUE:峆R90ޱ,K2eӥ0%i%F88=~x(?u=}VWZZYͪm0#;ԫsw.?EQнzp!RHȑkpld6`_Ðdѵ`kS_ͯ5 `+/GremLQ8a6\A \ 2PHtp`.0 ,M'u5,nJq'PFc>߭mpp2k-kwHp!97-MiU4{GҳYx&ͰoVΙܚ0W5wd+ڞtMA6'5p7׻Cu&w$g{E?l&AdZwOv[Jyy݉϶wHya<nIo~u;AZy6 F=gߑpr8޵?ZRJ,joݞrӡ%*{v=~ϻf SNzkx;X]>s6mǶuS-YC[ȧbeI׭2v AH)ger\kに8:僡RrRRDަ.ě#oAJRm)̑(uI֌$d~gs|rGb@!K$ˎXqXMVFW>#3>}`,U2 i)<R :"4 \zdHQТl՞7d]>cgM'xu9o^޾ޞv4 uanD|HHJ`:UxHxU[nQ6X#QU*Ray-XI zTҪlb^Rija zdx蘋ԙ- >蒄ʜ5MyFAwE64/7FƷ+Y0Ȉ;msrDtt$tdX1ƃYxpgYxp֓> N2,9SDȮbTLkn%}즈5)WcȜ34L-/`L'.뀙HMqK籒Xeff.823 GrAv9 &*l4 Y*XNPDT-O.qgvBZa&k#Yȯ$r)PO K ^2Hb| #>#1W'xSLY,{.]l 5/J{e2$l)?~>VL+yxI1Iπ8`:NP"eCLR-l2u ?;rv"h8b7;Lc?b"C28e8 %$Je**2vJ/%(]5&,YI;o6f?:`y6;Bے=3sЩJYvq\0嵓_D7sM;@fy$f)4e݋FUh$H5Q3"DĉYO5PypajMmzvy7͏buY/ݝte:|嵬o&EPV@=Vl3UqVgJH RL:gS+MsI+m> äFPv:Ent(Q4FEaX'Ut0fGd!kj0R߽(2w4*̓< թjM`<̮f/e J:m{tUkysv|*oj+ʃ}E<) f7@2%Zx}D$*FXgq7)]m<~RMˑuoL{2#YOͅ?\>x 6dJ̥En[B4/Ɨ~܋M?^_~՛_ӏ_aܾyo^=:ʱV.яA){|~~O>rx9>6u7`9M]r]K@ /gIV}pkj[_v]=7rG.K=+UEnyo>W[<7^f!  XwZ4 *ʑuMVғMu0mmdkI߯jɲd[qHh,~"灖݁P*hCKE!wѰ$F@ȇd_B#ٷuzH1s0{7bA\ckUT=7XkΎWty\=YZ0gc\⡿-:6Q -|Ŝ$cؐ* ( u G%<}bHOOT.yjYu ,CoxMZElqorzjwq0= >`wkd0i\}۫?'-Mv_ H,FWe]oշ%(k#}F{'Ͷ״IRRrT]6wy;K~M14Xec'\W V &kY/3TJ3ٝPh)*mz` WG͙|xm>g5`P]+zHb9sPG'o3HhJ@]a?銑VRY?QOcNӑbhIޢ$ i ^H9^J j=Bڲ12 ͞ODdo_4ß?h'>rB(hq"N)оtiRxvUF4^UMƫyӕb|r ]'H0]ԗ-Wko{>kY?elGOy2]60!Jif;KB*!hE{bM%T{c!:`]H"G:*[:Y1rvamha<vF-""-G:HqwG T*A{JA7}]b:jmMphP8QpʆD2(NS<(bQ{4YEΗ.FnD|U;iW:qɡ "qmOCU( qR|E%Ev&qc N8SbWw0 lYms_ӛ,^{^i: ~ ~k7y>Np3y?!>`%*jujR5T_b ULqgɧq2->\Xq!U1YICe Rֱ~LY=&`n!?ǎ0B3=xF\Ё0p"*w$'F'вc]MqzSP  ]id`x9[۞A. ;OAϷMs|{Zac$:FϽȅ oR,J5@)cˁ@ 3 Ɵ`w{[l&K{;Ej2?U 7KK49 bP%FGeTV&N⭠B聧 Lf硇V[CUЉ9ABf҆E!FDb"O #(H>akDN~Wi5M';Z݀;a{d mHmbZCd47k}kA"6g 1cȹ1~,!>Ck|[_I[s}~~T_֎w{,yhjWd~6}-g'ku$[6kV2s݌iN@{ϢBUO47^;͑z%ͺ<#4߷z<\+4j,u'&VWPi+w}mt3]0Sm6FOLRҹ6jYn޴ ۄ1o9y q;Gf@ ;S b D0cC5[Xpiؖnni-Oýhz2pńe:IL$ʣ3B7E'saߞ;M;/x:YD Ĉ@ ùHD\K K׉զZJbR+>==2tVu^e18lec?X3ra}փza0zqG̖s_OnT$[-U^UtP1lAx$-~źsŦ;|zşPas9`AQ+3XL:JIKfs}`SuطQQ1"ީ 'M$xLy&yI^{|[IHou*]OsqFݧ?6vKo>j2!ؗG/SX3e*3}zDp h*UR(pJ7\e ? ԱUv^T^zp@~]cp Vǣ]er<Bj UK+ 1iW`s ~F]Nu7}p5psT$*YOoro4e+7{UCvc.@1|5f.V )-!N/$G ('B',1=(d%YAm?N_5ao_41xg.!Cc#KTzI*$24ʂ$ ) V\)>Z<]Y}a}~_;=k|]Th-MҊd4S48KtJ;CR2$0FX+$9TIp^?,42ZILOTFZ/2pQhr*ODl0\""D9?FE3W\O3݄R/\1kdX[~B49 (€\əOt/d:C8#S5ߵh9rVBCX-|FA鸙\b8]npAf. a:Lla`{1 .m $*ʾ]W#ћ7gWTqBpW+rԮ⿯q8-$^ުRsZapgNMy _O~p=;onU9@njb.8gevmWƿ^g ;-hi{H\lm>{r01(\yZr&bc/m[Q)lu1ɶQ*s丐:9_&W>w}=EJd|r+yS+OڑG%"f-99pR8UeOjY0u\A}#OCd|Uf1XkuŜW ˎTs1KEgALA(QE%Y%L!]R SbHPwLdDW3UCi乀vQ$ cz/xZAUVU\k}'# kkK)6%Qld2eH@j5"Y~03Mya5&^ Vg k`ऒ9&`XUYl 9X# =Z+)AмnQTj9 3@$/%/*2ۇ[DW{jyyf=OX%%ZC3+$&٠9> G+ SxgzFymcE%hQ"j2p^\_jbLQt'ѺvC 9ke¡ڇj(kuP*ԭK~}9v{=0]6zϿs.C mquz{Tz^m'nW)j̩cx9|q{Ԇ_ͯ[S*݇P}b)8!k 53fvi<#t̒(ԇ37u{Z˼҇kb:˜IzqjkپԻX:/uxxp7qpe7L\Wd4Z&7lSw cCDF5wEN{n櫡9ۅƣ9KIfN!>yeeςIøP |%f6\$qw&¬qƩt$UX; t8KtF5r+3: rxV.sN4Lnԏ+#0E66^ztۺ#WL^Ȗ1 glaQ*6Uvuhau~)"w_4ځ6[߯r*mI\BFitQ> \O7')'u~Lɀ"Y$H*Ef)+% )x\B R'{2AA:ydO%$}=FDQ zM vZ9Gy^<۶)[Sj|Gq-%q$G#9z^rd#>{U*1CD p$]t(9ήaּRTqGjq_Ć;Ԝf)>[AIp4B EBZ IrD9N*goDOӊ "O=]abTv7Ϥo`qs6}W$!?6"&FW[5&0"FIZ59tB=a=oߍ46mDcTxso 2CڇL-v?}Oѧ)i24\Yo 1Vo'_S'8G=zݻl~;0/mW}g}<"6Ќ$C]XfzxFjp{Q2E.#z.Jfҙ0*+񪿦8_@=ҴMGMW7%şo7⊾i{\<{2Df\ǟ?Z5xT+8H+m Բ6 Hγvj=[懎LSOL苖esU kо6.-1tN?Wpe奅ņ<>Q5nU:.Ro}Z_t7lفtOy,ByI&؛Pٝ/޽h7./&ZoW/Pl v9kOӠTJ=~4+H@r:&qo@'EKV6Q 88^H/Ad|KQA[c3$ ' &9sD<%lwVfe#e{qhxMz^wZ0rٺ~)´MQz ?*gRJ -S.=>K5Y%Ʋ 8!k)6F8X"9<:!_r1J/-,2$RҀQܒ`&,F@ʙ41M)dV9pܧs!zKCea*TkΑN6 c=x+1âXc\Yڀ#*<x &F#}r쁪U׫N v`;۴.Di]s :[fп ;vn#u7 zo7׽_p7J|O}뫇1O ߚ}*Ɛ>g]瓅O]š~uZ8jPr3is86_{]h fV614* Sg]kG~.~++LNԜk`-Ypc{nإڏ^+_38F2ۜBbBH^Б F:eJ # >/J}v0$a%Y$^ﯖݻM5'8ɡ@&iQ9ㅞ3uL+ްE*(91T (Y֮/TsFȝNۼqWgܗ]q h6&3U|zu*seل)KZ4j:jYTF  Ld đ EڼѓD|,s J8xC΁2e I F Bjͅ:DCBJEbdR:D%Ѧ,,X&s@՚s%egz8_!r #wu@rl$@]'8kCĘ"e,o"JH9HkĖ8Ùjlҭ եB4e8]= vi2q8@Fuj[:.0m`H42&0&7Q.JEv0Jv,1t02D<Ȁ.`L /EX`+,Hn';J[S#H/jWGG1bdZ]8)f2v~!~j#cP<3T9W9uD&sRwO"K8Bf3'9B9iĘl!:D*vqg{z<f]h~C[%99h2 "WV*-򜽴FS 䘹ud<I3wV% u@`"IVg>&p9A#ʁ-5q<[(;MryymooWڧl+:"aW$dlH 3+əZJX>ʸ#7vG_<:aIRɯQSYs$hZMRj=(M]cS|x(l ʠ 2^οWl4p@{McXec5cK(C <W/8N ^'*h˥RT|x "= f05&@;B0U\\l.t Smʭpu_9. 4^o?c Co?x&@ֶ=k+d9-6fт:YLYr/"b) ,9_YH+qGfRoKBw\b{jfپ/nB ]kV>sbtc5ʹlܛU-/ߛ?Ǜg 6w_lb}6kgI?x6mti].m`6%w{&wXbJ.m`G?;.m`6Kإ vi].m`YnYrIduϲaqfLT6jJ"T|KA0$UNAWB0L6BMOM:߲-;rG}K+QRN$ZʨS.FL$Ōqd η|Fc/Y9?d;RWxqL[eJ2𤓌Mɾe!22C0!iN-6ȥndUX: -%r%'=)r \h{}5q6KqL%NIF;^鎈"Mɻv-UvX[:|W!`{tlޜ!q-)>ՙI)Ew-k0r^^ja5WAAYJ+˓C&:pA']:m(\ ҁ[lәusa<XRֵ8ܒ:FG |q4Ggc[ i4$X֡!RrC޲dJ&hjx0/F K]Ba!]K 8g ;GIĥ-hY-ZgxBw&׍K$HMגizvv|Eq_.ԆaDwNI3R$Xp<+'5yL9xb*fQJ`sR;l( |IEΗ*o3Vk-1 " 'Bk4&H !)PwvBcPqߚ.!c6ֵ*˜CQ! V! W5Yf\V) KS'Qr@ǚ1M!賲exa6FXD]HGHYh0HD)B ]Cz:;\o 7p)I'n3#Agq);H\9?AZț+k#%"r&ї:Vr:K$,d_×1O]H߿*1qϋ!:UrxFU!zל@8C@3 Svp"`8pq:񃋃@ l56( ( eڢqI;M Q/C^-J4F.8\S0t;u5zzoH*\3 LcY?kE?UӬ0=_U7,}:☦tvxIIDޕ6Mo/"k/'dz^/œa􏎧}oEj ^si`MR6Io:iz o~ʹ,+;2ĘK/|4\;z|Ir[㖌[9F6JX~qYMs)/>չ<DŽ_W^Fӥ8V7H'>?^1?o~y[~~ß޾;n__~Yhb8hjĪYb-CO?etfe]ٰ0+iо?#;l1^[n@7^5^MXժV:| fU-Nb !8fD).\YcHc#]e+zm7˛yJ_\rSno@ vƾsdb`VFfJ mk S=]%)/q6:灬23zrVCǎ̑M]<44/zsJN"FNR,QI}YnɶX"T+P49?R}FZӉ+fX_ɥ7V9$ϻĶRԎo]7net춇>P4ﷺo7_S;Ӹb{Y o~N/{?JEZ|kDwnڤv]퓺{SRT-wLhlY:%-8Q11QJ7R\zXl>5{_Ӱ7fjr]6ur}O˰W@nWNj45]߹jQ4{蒾-kMǭ\hy 'ێi="`[*Zz]~9rq:˻Aj\rzKUX2Je߱9e@;#6.&`HkDd!k5Kf<0c\L!Ũ,f);sCo4`S^H3 #8؟ҞtfVޡ3%NGbyoM'J9S4 T M -'{1``rj"Ȱ|EPvl~P~! OF9Jr(/ךв&݆hi:8h =J⊓v{%ߑ̣N,b&kCakڱ)Pla|5OI=GlVbZ:Mpǧ9fUBϧ_j\t@@TU^t^6ͣ :q'X&A-UР*DSHC4XŰ<.uᤷ)'/BURPO$<.$YOw2.΀a)DGBf#tDB_ *}G .J[vn70,J(,m$A5굛ʠƒRmBF /'#*xp^?\?5̈́ǞBӞ$yiE.!zHD4^u 0Bk5m /뺼rI! 5/qTW@qh+-e|ckA ћ3-3.]c)f/m=(t~EևأTu^BESrh$o.JGkj$O@B=F1ԊǒW_dxz[Wa_'հJkw oת"u"cZ/ay'߼o9B^ֳUm[ -M\G@KEATq_- P\e+QYly9~ūhuS'ke,*YG*ДU[i1|Rz..f|oR4]N:|ȵ\fo}sq{T<>/pr."JkM14}ٍ_{sK$ pFfbt2J_BHAQuGn/ xQܒcoRbՇh(2$FĿ\N׺ mAZJrVScx2c?.Ӭ#衻]Kaid[WK>4.jFʠ-"@#|_uʖ:]bܮi'qtTMOh-"(Tף˵qI[e`fh:0 r3ƥֻcl6{L/oZ mx`yYv/nwİMOx[̚e|f:[:Ѷ$vʞ/f&JYG=oGnnǥˮmz&hJqʀQ'RD uZ#AtƳ0\HʢƸh \As-Q (#O@D Ԉ@@$Db ڥ脥 t|D2ĤzWOQ/-6N=zY\xsaJ9'} JZRr`GG)CK5 > "c\௼7u$`<,Ei2cø\ڃ?>a jf`ˑ܇>/Mo0eJ`s*E `f!<)&ep[J͚ė󑯠ĔM & 00#L hl(h-vh3ZQO{Y}4pQjg\'K?qΧOb7Pe1 石4>] 8?oOq;OmxrQs& Gj(=*Zg;`}Ͼk+~PCV }$YM6]m {3w;) "|Jh6Wʊ*c5X rQIETn@;ώfpXT4h=bC{"1:h YoNEdMP kM'|b"&ygLDFP)U!2+ Պ}\!̶z%!;PfyU#\Ҽ3-ePp~IŠ`':7 :$f LO(&kU-g97=8˂#hp: yLpbNtyBEF#'/:qcA^(*2wlS΋HhE/ύif&hj`a:p )cR9M<.TƝ `$>GIz-"-z{%M6޴2mଠ+goP >d>)^pѤnzl|=[ cphݭ݂Zo&jZFW֭T/ s0Hc69SM$T᳼("hai) <1 y`NEcׁ:颫t 8F %&154rg2x`ٶV<g=guWog G׷|f~^M*$A9Iks T:B;u-tLŇ,Wm)xpkGr/ny@B-jfXܢfv ;[Tz5<5sUoQQ$'zZJ]ʕ*r*siWWoP]IɈd'ecWW]EOE]I%^]Au2]v2>2B\[TWhY 'aIO' O*s> siO2lAW[cWWK{ujs;4?_ig#TC>,/K|X3|}>}h][u 1-7~嫯no&~ AXqUduI 98=ﰻ,'/o>a!  =Cnn~S-߀{Ty)U2w{g%osL?{6dUZT!9B~𳇟G ?%E[lD &bq]w>XM$!B( ?0ǃ@$j9.DxP2#h"="g1v"8uW+sO7-_~wFh5ԴY(6nFNwH#.yEP\BRYJNov1X#36{W`oY#Tc)*:᭶&H]Ҟ-U1%(T[X[7W 9` NVzq[6EԮۮ-,%IކƠ: jR5H# )%iᕍVZ+HPD^7(ߦAű\A(^6jD1 GQhf P Q KԴwcsæ2˫)b"E!\F}G/@k"!k%FT#((-Gp($q*N)}aA-nA aiVx|v2TFYJJRNTrI]z䑗(WE6N?VDMYpչڷ9gM$-q<^ P )hQ(J!$H.-S'z̩ch}X[J؄Jf,nXӅV#u!'euaKׅ҅+32>Rp"+ۙooM6~Ӡrv56#I_Ƴ>4m{ۘa= )MeRk7T$%%%J.ԬdQ_dY&H1BxTO *:":J,T`gFCq4+և;_|VӰOnZ$z{;48KZs) 1S4 iH0b4"шgf5B/7l+^1b'6 Nȭ\DMUqLa |t{U1S~hdcOLIY,-!0l\t|نY17y'C OS~y9$6jVhLӢ`ͤˎE>5}i\FA2 Y&X"4#Hb`g8^QJ:01fLuLAH^IR ^Uh!!Mz(m Bôd&C'n%} n6>cI=x=ihXʱ i_tH$!>L,PD?oh,*¤ڛB! ̻(AnJclGUq~ΐ3(8_GQk9J ,1FLSʉ*)c,[LsCYL/ݭiL ֝`yѸxr]Xxq*ک̔>U3% MۊFǃϡ:HiJҚQ y yzʟN$j8A [I$[oǼמx$[sEظl.'uTAuoz=eg[zlWq[vm%A:\gQg{* \(l'&Aq@8v>[q8zğ6wO_aVWG+;XVIΣ lk$Up׆*PRBk~r$x䝲XI,ڠ)lˬ (Wze A2 ǨU1kQRk KGϝΞ"ȟeO u(bǃ[d:+0"a:8RL݁Nù]LgHͳ(Pƥm ޣ`59$UX3ꔸ㷟K??8~:Lxu+0 6y]UYCPz(=8.]A]Nձ|'Pg` n~f^N!ѹ0 rg2;]\]CmTm9g,2s~_ޢ_w+C<d ΀G fO']cDf gexPQC{RdAoǨ#GM` B=RI-q -TɃi"i6dި{ӑV>y!f#sl4r*֓$D!x*%^E6eGE6#5iASkquk [@Pߪ})~hNKX⡛.U4̊^W &uDhQhcpr= 7V95J0Wg̠] `G a+-߽wRD~z{3}mG!h9|h+սH념h8;;B;](nq]/410 .f`rH8TXpkZ&ĵ r B($W1pH*1gz4)!VFcؾ,l: $i{<{ȋe[ "?"FhSQ:5O~TnAڧZ#wfI9t0:sBDP{h{7a.`8'^LƗIـ|~Q;ֿ ceӧr?\ +WX\! [ýg볁i6 rQM%!UH̒$0aD` EV? A@W )[X1c=6MVHKDf?lo!X9\W>|u 3zsٓ:}kVoApCGUb#cE3l4.=ŬienqkkVנL+.L6V0 d/Iu0+\;5I62u{MՑZK-d<^C}L[<ow \CSYqV^f;N鷷-kgL{ivH˖[2GKLlݴPli/3w,RXL9Hi2 l]c=ު( n%]~܅βgMsª /u޻ٙ/Λ;{L,c05%a`7+&9NxQ>߯I^9;9Tk B& EH^4IQ'Ș\VƙJk,`8BQl m \2co=3Zgjb$.:^Gfӆ&wm;Ca63 p|Ȓ"IolK,- jSbUb \ 6h6rcx\I /@Fg}$dDC,>cclڦBZϝK7cͮg(  UizL2i*jeԚy}4NucC%InXv2!:cJLYd\J42ieZ";;ggx{yHG&RYG** %"$% *GI02jOvl` PdC01oxfr:lZd B=xivˎcˎĎaOƎn+[}} G)K0E(9<W)΄%G-9:,9ґG\|3+l5hy*A6пYn#w G/nEqT|'veޛ/ӌoX5;zgD0gA*)PHQxo(X>'$j'o#w^A3Mm4|Uae෭}s ƀ QgT1`5 {M8 N%WY$Q!XY- 'dk缌,] W>\f?.2rˉE;tkҖRRiN) {:"R#OUa)2鰾tXh9G{BpE &OB \iQ;\)j;+m5WNKZ%/¸xYڭ7gW{Hm`Ia&޴wG?.zFwJћNY/z _>/̦Ʃ+u_2{̱7W_) PgUul{) ȲX^bn-a6Wm*35EQYMM.`MI}қ<8ʮkC{- <1"+6;{)1 E\O1@rΎ1P6 \>*Zu*pE>y͔Hu W/`Wg+yxv Co؟R:.|m6vf!ʘp·d"o ^G?p!A[lɸ%O-A*.-Qغ%Cq$mZ쏧izŞA m LgX)xa_M`Ygͳ~yY)X%{̣ Z_d0/3YmִFk#F+!tu _HbDnH9ʒ!~r$ZdFM>Z@12͛ t႖YΨ=; } S"%1^sru0{Re~Zcc$^)KDI]"Ɖ39)'3L c}X&"p<~>} Z5BXnss?_TL s,"' e ET*Z9{i)!1 4 KZ*YΕ3' Bԁ$4r`ܘ8<`0k,țk';ZqSe6EmQn+Z@vRC6QڑJ)d+4QTV$U9 ۑOgeƎQ''oܧMc$ zcOnunÝGHM(:OsIQz(}fFH&.g5@&VӲ%>o7LgKxGÛgE.ٮXAuzvvf&겸_.= 5vi+Z|d%c$&Xp"+'5E􎛜s2ĬU>磔Jb9$/лh;:7A JBk 96Bץ GX͹M@PG!ccPniNѠ;=YR,s*ޢ dn^.9-3.k+)Cf'`,V%Lϵ4m3=Nf'Һ94ܪ \Ua4z}K_ϝI T=.wU{披xD>IgM9!Iy.|,70~xS|0^w̯Ծ:W-?g w5.Qx 9g8Zpgn+{^('bt~,Y]c ] $Qqr7o^O.Wȅf3 e,lu59p]OWV\F7TjzXfkw?W~/t529[!A}sb:`ovQDjRx{غ\-]kFoƨ,+;15FQ,+߻ewsKnmnuuB+n:.i>GJÒoQ$jL;#`rCCĿ;; u;ϿRǯ? so:Pj֬뗽=xOswJu뷃a)Sn -aG؉Ph/'N=b=^Mhijo4ܦidgE(vv]Eޢpk1!v#u5%V#lclOJAx\B`JĞ/)܋@Jitt <Dpyn 8ѥϷc:u5W[R-)`IjV>z0ضL@Lˢͧդ~\W~wV>i\}}!`Y9ge=ʥ*h*$T" oYy?u<Ǐw.L@80kApYH`ϙ1.bB_ Du\Ɠ )E| {hu#'tq?P ;gKF>$zeI}_)'2fYqP Ld Eb0LF|0ZI&\5dXQP"he6@K^ɉCBOD d!W^F53ژ8OaT%ZF'qTޘaJ?];kp Z^9I''uLGή=<U{V\?Ӎ_SJjɄNldR!Z4!\HQdTFU \`H8"Mt+pKt@idlL96;b!fXO J&W^-b|8r _z#6$9%"gLUJEp;3YD욍 i`Q` Bxmԙl;flhZ 'XЦ锫M0bA?.G`Ҏ]Q7ڼEn sv2dzdJ\ td* LL:`D.f3e Hd!3HVt kbbA$LPDS9œYFȨN5A1qڨ/'Fݘ 0 mJ?vED0"[D'a՝: -KlZF`R")ZL2f%FD ȪE.8JkH&jpc>4;ܘ8#○D.:&_gcZ+.Bø-.IyDpp&&7>Z--i-َf9b&kŧcƴcW<Ćw'E_'^#Juড়4}8~'6g~Ŷh%uø}孧_~y>.x6[+ &AZ,`U3 )gL$͔D jaEgjC BKB"YIZf-*[ r)Jch *Ĉ:ey,FFn $MNJ:As MooL?&G51t$ǭ~Ztǔپ^"ZЩ5S<:C)AJed2V4wmmXδEUydR'2Mj+Ie{/ݤ/$(6~--98B@eO Qi@(yj4~TQTC'-o9D2 !H*o7vn Uѷ׹[TuO7J1o5WD@ThiU!T8ba{1VxC2M24JP( poѦcD!E1z_=P ;␺э b8 ",[gYLf_J_JcoJʩۀjw_X?[(7.v4['~W~3rrl~||׋jU4&e=gq|W (nzŨ/0R%*;=:` Z}RxivOA;>=閑ys+3#b8KSХ'S_wF~ƍ۝N35߽Ikwb\Gd]li|Qg<)y <({ 6@{iím?s.`:{`]?xfHu2jOq\3[QKG^}A_l=liDz;ەl}1Dj.Nx\:I8jbDiiPLMDN)p8}3!]%py qz %h* xTAOxRiI-7/dO{o'待 Я-Ϻ [>Nƛ]3 !\v-CdMew4j?CvroZnm~!ۜ#ٹl,jF#z*(5V!Thi+:Slsgyz˙4tޛs>.:[\݀]D]*r.oS"Ffc+֘0wDDa%҈@ͱJs@YJaVKM!ۙ$sqTJkShB&`DȄ)$׻bu97FXW A^:Y u!ujDK?.=p{FbɀxJO HI*l {~$g^!iFP%4tP[H40P#Lp3E *^~wӥB^6^wU^h}a_Ĕ 'Ϡ+0Tb}2*2QV&P+%$@jE ZWEHE 9U+8ͅZ)Eb  a!Έl*40އ\aHWgDW$pa6UDQ*$]]#]"$C|u%]EzvQB9ҕҜή U Y.tbw(@Wi9P̞j;]u,\/|v -n(]tE:v!eь*f<p΅Z h*Dx++D4)]EdCWbUD@WWHWBpj|8G!T!?|qb2e\rW{SkxL(X]0aAT+߯F2)~7rxf&YO}QoO5vA%v&AW[~ti.N]t&Jӥ|^1_7&Qeݹh',D+U ?ߏ>~M؋7&Tz;jl7o2{l<63leqjhm_a5s'f~rL7٪X͓m^d,'=Y}Db/o{_د/mv$\m|Z=rHiшߏo~x;7ŬC.Wx6+Z݇3*0h8[NǞ]bǐ"۴h? Tcfp>yT0<[ SrgsR~Qo9 td~?k5#\zP`$ ʕR*o ";ޱd? OJdm^tq~N% 4K ݟ_l49%s&UqmҦzi *ܦĎ6Fv9[V\m])Į(v0򂨤ۣ.BOsrs$2bj #6ZK`s Rk=mu}7}֩-6\9 缶<^[j&Rɪ{*B幮-<~L_[KmtWpsyN0"DF,SVoySg9š(FSM>Փ/+5ݨq'l*qTy4.>:' X ]EBWB0p,ܧUD JX`<#Y>`˳V>Qt@ym9HԼ]u ѥٺ%t -PMn(Yϼh];PR@Ftc ]\*tQ 1\r]!)U{2BNW%F]]!]%$ F7dCW: %k+ 8# %U i.te#@WCW N ]dCW-}<ϜT 1UK9jh;]E|F0+9] `UI6U@KaJF@WWHW2&U p$ h }^ ]Sn^ `𥵫npمNh!1 %Q;@Wlc"&͈  l*e(h/JFB]u`6tU•UD+h*l++1B9iWGpe6U@!;]RhB")2R ]E<h%;] (w՛<zȧH~~a-AtHcɑN >$A$stAMmN"ˆ+y (/| .乘-FDɇk42.yFtS&W\*q$Ba߻ w0{1"EY_EER$5g4k~jjM QJ]@Rlt|BU[|ъUFMGW/`Uۓ͆p)2ZR^ ]!iu᭡++(i ]e42.vJ8$y`f;.祫3ڏBMbWR]:D-+2s}Z)NWeGW/F j ]eVѦUFՋ+5t h ]e5la]= ]qŌnug0Õ-tўlP]@\y)7G?|[7v2~HNܓPEtPJq!ZġXqpek<Ԍs(Dv9Tj`M+05t>)-\7B)hP{t4SM+ eR2Z?4NJwtrJk%2`g0խIC fD2Hhu+pMkO(Eg]=N;vqչ\(\R4 }]鎮Nzb4iG2\BWM+DttZDWppek*=WPR;ztŁi-ZDWHp:-ʚNWeIJҒ5t hn:]eIWږ#AM?ᨎI > (Ҽ#l,zaul4&Yoxt$,cf,7f}VtڎpĦyF8?ZB;ˆ+kbOp(df0Fi#q\ i-}bgj%7~-z3ꓛA~?pZ3\ g- O'=8nr??o%gkt]E_V [8TQOT fg>b|X_FzC\,nĄY2< YЄۀCqmN=S1VsSrv M7Von0.UbdmIOPUnD]5:ʮfǥ;P*C"d3%)=$jB q Ԑvb=Cch^hID$&ڨگX#49f%֜8b_%D,CRx(/\H$h4R|>Έh`āek ^8E]q-)nP15ۂ9pX.S'勇ޯFma%.ʮb|t&yLLqB]&W7nk>^k]4xI+)<"t:3?;$<]I#[lD3М7Ht),3`5HC>x:L{N ZF 9naDrff684ugb|}9K]ufEy>fvhںMWx6Qp+ *2FSrx9I!Qc9"R~%hXd­FxI.:EKTvBP<[gxv=3X_k~uqOQy{γfWQDKm~]On8qnwJ֊v DOnk|01x3aD UJ 8hH:$eO$VS.5|զԤVpZ`Á¯,YM> bvJ_l[ZӅ ޱd1BIc܋0(1FHI(N 6ay-7$CG /Ph(kLD!4,%k=TdIQ0+3v#}V%gVVPCQHIcF)XQv\(pQKor =eJAh)c$W8q6;v"0 ~.MKW=-EΎ Ϗ/TS],5*čG#%iEA,8KuJ;CR2$uABy#lsI^Jߊ@۞Ae4xddJ @\B^%K hE-n(Aܮqݏ5C!al2c41݉4ʠ\ENH N"&CMDR cv;niF aDx̗pov8 ;p%IbYn"VD(j\Ɠ$= ![3l$#DND[:]ÄعP Fn~wϹ%DJ: О4@NeccūKSG$HSXFʍf\Zux}mr=mO _X2^GLf$''T2SQBKrr$b'x'+'q`ٿZh9rVBEZ-\G#QkD鸞\j8nqBcm%!zqXR*?g#cmoojKxۻ %קWU2E% ؉ߩvz246Tk<+ˁ1M[̈u6O:~v!Fs#~Ӡv-]0! ΂|x;7h퉋=]uڻѲEfy1ᨖQ8M̯ v6ѓ>M[2 zmݍu:l>}~We)=Qs/]`ϯ_*'Ѽ?l|/~o>~ow`8iUsW C/wWP3',_ +|}4 0 )џ<1>-_2!|iu ҵ,9/%V!1~@P@7'[7Zß\Q!`#$s%V:sl)*Xb.9NQzjLTG5RViByr* )r% n9lL";G-T'עh}>rs~M[VAg5a:S+_p(oφ8}?9R+YJ84\D*pFVƆTeVSp5.[W62KYHo㣳-I*cW+"uւ֔z]&_RKQ3;c$:@'-*Ji {k:1wZ]]3zjbo0g@ݜa Okr-'Ճm雵pt3lA4uvlw?< i\Y˽WYx`0.\=osZkͺ5mii)*XcY..9NTΞT!cPVJIG1يS+kT,hP֟3ı7lgzJ!JT[|4I 2 ՠEk"!k%G!(2 DT"iͿh>\P1q*N)}ZI_qkǷ]<|\ëm}r$]\͹'ꜽtk_5p:Wd-_?.JJ9++s 9+l2 JJkA³0aL+AjBH-,:$ EOS!ǂAĭAR RmB!UZ2#gd,Ub,RV[0~uqKacvoTv0 .!J;eR6*(!2%:K$Q&) <ٲ!!q8ǀe RyɝvDĸȄrx Kbln4 * RܱԦ6v`7RKf T1I7 Ds`<׆*B(ͪsPRYﴠ5dD!*5!gi/8B IQ9ajex48+Ya:Ič~i!2<S"ʆDО1!Ci@G NEqnʠ8KYθQFID=jҌ+rϜ/;\0P+t IemILuGu!館N.>\<yX;`hBG? a~FxɝC= Siw7׽oALy_jA;`PFwI#|_c?k_7{ ^ۮϏӓrcvt=z9}=S_o<6^%A r)} Jr NuETV"x"9w:P 9X.C^$Ƹ#͏ret3 U.鍭Uzq:-/ْ6fBlˠϮ#vw|慖a<mxVwcޅwc[ڞӗoyqv]=uewYs6Y]Wy^nV//0A,֙dJiXlmf6\@WWR VU[X(1 vcꏖ*%,oJmgXcgEC^ءà#MQҢGhQ%6o;Eo/DVZR.6bS)8K(vIYo;@ 5Lg Ǟ? Z=wCR.wß0wGc%2 rP џ޹**$TF ^y0nEgyՍ^PnOo9w/^ᳮ`W*]<-GiHuxDHc2VM7VZ8ΓTg<+͞"UHy'rX"eNEaZCz8FA`RskhgtJTAsC`VBZ9 vg3i;f}}i~dKK yXtQ'| cƘgd7 orygcGjAc7#\bfǥ7'2ս6@,G.?p^Qt7y*r\C\N\߯,=Hŷ5֖̥TwG̊#FI%{3fKZ:dն+e#nBi뻠T +htyA5o%'%.@q…`?^Z]vY ~qniKۘW}lS[aLVb&skFӳg+1kN=QlyDs(@ArGY>}xa?¼Vxj^Ie1upg]:66gge<X&tTjVp E@R/ER}էĿN._3|CLoYq 6Nߦ!~˭dROI B"X,H~ Å0Zl{S98H0NQBj=a e `9c 'E~_Q6T0A JO\RsaKwmqWzvkqda?`\g# QZ>ߝEhϴV13lQd(deH lvW4-6=7rH,zuP̓&Rw [JGR̜1'Z^xD,} BɌahƜBFܜBёF͌g{ La z#M="})3( 'M#:+:1T`&B )60ИUK3b@7w&)ᑒ9(3 | hm{lRLYأAֹ"+XRs V- '16eϛG!XUF3zielsf b;FL!f8c[|o:<Ö-Onadk;ŖᩤT0Ǒ֑~A_; d6SA*ǘyK&VCJعutO!QTpuгt, oo'2=drf0L,,Ǟ5x0f5D k ]5v:'"˰(g٥W;3fuDˊ? *c`S,Ŋb [ **(:;-1y9|hB?+Ďq0HPTHVDO)XIiۘ ȆZ^PѮ!]Det((*qt2kX\bNqKmrCZc 5 #gP-nqOZ V;dWl1wB!ՠPwjG`(c)(| n CHN2eD&T4W&Oa D-W8Ԧ[ku z,,x$l:M;&q b jqLA, )QJ@42~T6Md& /-H2:@M=5@Q"Ł.0͑`B(DڴgDwF"=d̟H_(_ ()UyBE*Ѥ#MUDI)bh=9>+2̫#lF7KJ"5vT"2%w[-ȲޅnieFjE}lLcBEДBuf 7 {t~Ŋ4dY1<4T(hH;L'kQ2_0v8`;ګw'*Zˍ>Yv_w߸,LG1>]spq08CoБ]V%Kjk%.Ue lâc:JrB<#/,J芸P^ 4I6iy],C=h d|,EѶ< =K*`KDUW Y-'[ߤx$Df6q࣯EU7USYߙ%72b rG.$ a-&+|>y'0cͦYL*f,BȈ !A]Xjm>B^lZ@mDebj W5M pOk ) 1EW"HÙPȢ td!nd`mBgS酬iFJkh@2~ȃ!:8vGyPQ`QÍWpiSR"찈JfcDScXgo_ X++,s)&j$,Y+"d6RꝦezfjF?`!-3!mZHn|Q}AOqd fdc, zl87i6ۛsT&=&Pr}ҍ';_yhb|9"l`_[= ~v12,Fn-^kIٺ bfh^PAYʿ*wG3n*3xXo:)嚼!SA)aɵ\'69R|,6I1ơ1XC|?y!K}ؒsP' 7B0#oțbCw F-T١pNʢ#:QGI101xϦkP` tAq1յNlF54<[ Wjɨ6zPyXgoӒՏh 9*o"T[!P?}--kPiRv"oܙAwU|dͨA` E̛NMp 0h|Gi^3 _̂B\Y M F:`{GvsGl$>Muz̓Ҕ"&b9ڐYOrfm ߄C|9N](I[l$dipMlbRn ]wS`C L0H5?_w fS?Cun0=&?QR-8u@]@>[floQ~.D а+9&q8@ tN mEuH@R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@R': '(8&Z (f'Pp@ț>@)gc N uH@R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@^'jH:N a'P{'PRQ@W&;R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@R': N u]"_Y Б8m'rw'laNktAsجN uH@R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@R'8po?QSip>n߼;QJ%vi%*vj\zBtDߘeJR\m{+Aŵk+O&Jtٮc >BW6잮+"q$/DWk-CW7UJ@QS+d4KWrR]G] ڰeJWWHWmX Vh~u%hNW2aHW/3xW+A/~~7M/~{ xmɧb'gN.\=ypȥσiVaM?th)#\ r: cfጽK<ˁsj=_ g=k8-:g7G y0,5k8EXĖe]ī؂B2k}%vHJ^J ] lNW@V70bBy+v÷*t%h70άMJWOCW)[y!\:o:J(.S)]]]L?@tOqKVt%p2J4Pc~zpRtu`W4֦:s!i(ʞ@WV{[peJ^8~vOWX zϥ+v/CW*t%hJPfUWHWd=չt%[n\֮NCU]]#]IѮp Е~dJPRTB Y Us ] \UJFw))]]!]ɅLivf2V+AwvoWJ6?Ur&uvUo;]tut֓3eJள.h/uI(JW? ]G{a\tu: n𹫓^}Q혨;.]puJ ]YӕcJWOCW·`y!.CW7UJЦw{JIʓa6 ] \2Е it%(vAIv G+Ky?Xtio )]= ]1zuo\ְWyvykvŠ{PzB8)N;V|A8m8>cP' Jl)Y׮Rb ڸVAuEKl +m`5nn AKo"JWCW] Z8: ] \[Ave÷HWN]hUeԕͻoJg45US\8/tVeff&A+Е&8.vu`KVW fs: 3;SWJW;6p+ѕy*t%h9흮2?Mzʅ/DW--CW׻U h^] ʽ=ءt$t壍;E$d;] \V+At%(vGՓ>u+|ibPeJ ;n 7ީ`}HOA۷(ebmMe ɽl3YÎ,GCOO f1 ܸ m4'YAiuI6$y N +p2@;] J%5UL>]pu4֬BW힮 kJ eٻFn$ -!vp}ٝ `ű2d8Wl=,zn[H3fWɪȪ®z5`XCҵ;nDs坓59ac^*W-FܪGWKi[sf̕:p\'f?awlga#G=[yԡqyjJSO쭀˴ M_Aor3 IKY3L/׽.]쎆iҽG6T}~-UE\_/JTވTE㤎:[xxusr'{V7p|>y{;ڽхCML?Φgcܣ 3FȘwҳ8ܡQZdqYBDdzH)8Z!$1NE?ɸL1g\j~ qZ x]j @7o51T_:M$Imłwl4ЋBy}媗_-fmmv+'9Tl Koߕ3ʷ,xI"1LnU6j? k[d0 Ӷ&5m7o߰c$ct՟;TVEʂQ_\-n/(7[Vg~wdˏWk?6O1GM¸1&Z*o+A>KJqܫ.XǴJ9BJ`*(KPVT^*8 S,kZY%W7\YtdaU*r'Ahss>qG#dB<$bN'̙ &#:3+=2;E.ʼ1OI&3%`Xa )zGHt|~1{=_]wJe֒Nwz9Np|݋ҟήݞkJraTO'gi3dR4W$;YE(C{2FZDW72kAބ+4,n~ن{qƍ#l8_jjuzi.Ho-ڭ8׍ZZ%}nYVCT!Bt~361Ap1~Խ9[ ɑBlfB* 4Db:MȎraϽUx)j=gBhڟRq/tdȓ'}=|̟^мuGPopڹEzX\WygB)wu\%Oܞ ~?W+哛«I+ݻ*@ڴ>VZPARULW.T@rEPcΣ#'ydH-ONHXDpl"fePp H$b$5 FYPtG*A}2%*fGR E C91HoomGY5?A>/ʸ'"h}i(8N_.scPp+'v5ڷ8H14:Zƫ8¬$Z9faH*Ù/:GG62eoFpO{:uI0+5(>(ulq41%e7x*_QNxBi) 2#\+LQQ4 cg?8"8~ hpЍqL@8(Z)f_4O0w{7AKpIi`Q0$BZ VvQrrZmDy84NCwد6ٳrpP욗߶͑/(0lԫ",;a:wQKo(?bẀ6?y[搯2OٖTlt-uAsq8k&K8Haϫ`vÈ-tV9|*t҇cۜ8 `yZFݝ͟.p 0S~vÂOS:SjW੬I:ik;Gލ>6`uW 9 fVsioiҘUv[gJ7->K _ "Z Jlx~+߷s)m=x)A*2ap>]frQi_}L&3*\|a:1TJQO*!t\,SE988kĽĵ=hF-Yif_?7Uw B#A=nn~O*kT0\*U'wGG[߱";ֻ[ f$'&2"#g RP bDPȐr&zi4"DCŕ1R +\dVN:}sk}#Byß7_"nn _ݦ J1JpG u^:?Qu8JWc_o6^K(2JX,!dTd.A N"w\`moaG7Ԏn VTu2Oבff5!;L6c@&T tj.`!&d4 | .5!Ǜ-+wuYvօ9 #5g+IF"*i\p\@_4qhuPћJ8'+T@T,Of ׺1 *$E*$lYjkHgH:!ʜ2`Uh\Mjͅ:D&RgMV(c.YX#L榓cg?JkNpm.([Մ$stm ū[#UڼHG8!8G8U,ڧȼ" 28+U̒|5|]Lu)G619qH^cF=N@G5ǤBΝaXžAU QxLd3d1DL"$h,;`q*1&J VPJKd37&X3NܩokJxZ_^)b!ZWؒ,xٰ5΁zVoo@!t%t *d;h^--k5&ǜJ)9<7#KH\̴jUI~+?7#H+T*mmdIA6$&[Ԋ)h zӿ>wLɬUT20s,3)2c*I0xUTGJRj G#cӫMe^D&jAM 6:d_ދ$TgEIx?>dRC1)9P$d"s+=Ĭ!j[;"R&zr*ט ڐFBBWk|l(r1w'^:W}NIzŕ(Hg9]G 11Sj!^3su* @J&Ҳ-jड़RyJEQĎ[dհl+Pq 0EdJA)nu8z xl&?S4&À"(Jظmw)RCR䦛Sr5LHϝdquz(TD>x:;|a*\eK/Ẹ0HY=նa"HtU hh6Іc+` Ó(9ƘES=lܔƣyl11kY3TvPV}rgNn?T e=q't%v3,G}`,|P']F nn{ rY*&#LH2E4'!wp,5:Ǟw5p3`?߼{?zu;yu/O޽]0RFe3ʚFMA/ʿn'kY> ԕ^ :|'`ϳLmbĨ'p}]SUߤkc]GxC^/Mu>Cb5A`]m:{=4.u{urJ}3+JX5)( dK@BJ%N0BH7HI#NQ laܰa a=XZ8'M#ki=<$IQNQ!N';[3Ar/[trfZ W|0Od4.I2{ }2GO/Tp#U&jRZ\bxg4VuAq77E xq[%/kƦ^ dHm7?򧧈cL7i;BL7z6~FWI &uD%ԖhQjcI@T va$I϶T+gHӷVq),['YsȎ5*C̦o: lp6HF7g)Oc.*5X^gO*Lﺡa*)ES/׳P67}Rl(R'訸: U1zIP!KEqm䌂md٧oo՗jpU-QߓRw1= JԺ;3ms_se6ȁ{w)G1X#*l~7 B$Z!HY1F- є 3UiI<#L;)猖@EKb{%DKLP!I,`(,^yie Gb%ɴ4k)L !x0k5f,`ZFLǀ.FSҟHi>:>S茜G ۔1ߖ^ʞuB  ŇT\5(8.4ctw quJ Nc B $G,`$9N`.0 DAcXXHjg<"`0's4 S^3ުѝJ7׈Rqg @wBrQ6. lzڋ\2wʏ# *ƘSk 7QyBPT[a/g 1I;NQ(" lG @Op >jK ^Ȍ ^`A Ɯ 52vF؝]8cE,[,h)Eilqdj s]"ڵ!O~S4߫G?8bӀ0J-4A:۠Bxj5 G#Q*gtF !){D%$ؤB:`j/#vHHL8T]G*?bnwڸc;I<1QPT9XӒrυc:t3̹JnF.qVq@C3Т  D9cH>r(AvFxtj!ȱ]ǪH:FDC"nB;J#)'AOT%w+!j3 taCV%#1)DXҠM0I뺾9#YOm]\dKVE1..p>ނFK -bBd"w[G-⼏*Jl7ۂqǪx:Cn¦۪Q8{"n1s(;#HtdpYsfϼX7/߁#Fm%TZ_:(wDOvN?i(u)ֹ2Y?^0`3 ;WS/)%w!%EZ.RA,Z?6#.%V)oJֹwd}{vzV/FzGɶ`'\c-mV}.v\@tFR8cZ(pJ QaT$ ҎSla[60' P[ (1 D#JS`E*L<>SDcDpC`Y,-Y!0l\;#a`ۄxY)]uY$[Zq3i/J˶{U].OuYNx6H(A(x)`e`p2Uw 3гC y^Ej9IdsRi%A!`ӭ,`InC@3!Kp< O׍'?d9a)8v Ĉ#(]d#l?1CfDS٭V&^ۢEMbɚ}хzPԵf׳?Ȏ.@<-&K0.zGB@y!X8 b@>pO`|hRI`l  +QbO1L#éMѮi -?hʱ işB S ?)$IFM2r\| W4FDG+d joJ4*0E͸?6 H?4!'zeS*4c-0GiS~ |lĔrJ%8)STyԭԲV_ԪhpqT[!ps KU$ II7 W9=׍1ӶofÔ\J{u$gf4u͒'`|Y s38mA(h#N3FpK;N(zi֧=O qJ6pMېM9jM Cv۟fr\nL{&Al$ݼ=\[jU@7ln.ۥXm+n jk=\:6uobMTs/)}s}OGe07d֐1J'"l#Fڙl,tJuHbJ*r=:h&)7TE V;"rINkqtb'x>9LupϖǻexXs1Q0ΥS6 LE)NV0>D9Xu@/qV%7{n­Ǫg|).'߽"3)Vw[;cy7U(%1WK~23ƕ18Y2hB%1 X'Oۓe%Z9, sHKν$Fx3UH1+ HSvw;oH_G`|ճIGx+vV;eP{)l'rla};#i :.xv`H]qRR"{KwvSO_oIwՊ 4][ߋwa+ spav4j\ZG酽!la){rޙha*zC8 #q˧4eo֣R; m-a"S&ʲϞ } ߬4*ދVo9Д!7XQ#K)Y r/鸦 6׬+BaT~xnBo|)R+R[2Ee2#hr-6|#6y0vT4=jԗ[Ƣc\?{ȍMmd:x{2,r)'T?`MoDƓ>P-4r@)N3GN-Ln|2̭gYf?SzO;b͝R|(*ny8f-|>I1 C-Z|Ր1&&!RWh"u`hDh2,"Jl*(*:tD9e LpQD<*~Ja((^g͍Mmz2*NL/!J D9O-Td>| }vӡg "+AӁ GB0p xIrYhYT" /$h4e>'.ɫV*,De%,,xbf'7ZKfI7aoJ,s@jR@@*Fjk9gR7Tfm0Tu!βQ1:z>XD"CDN hn5ƌVB6YWu25iij}}~?޸ 2b Ѵ͢5V 1.)ꍲ9 w;5`rAsG1BAZ FSpMŘb́Vj<PDOw:2EB*RJ@jPO:#k]U1%vv\9C9%QHV &g X#TɚwWk#g͊1C"#+^޼_r%aU_U҇˲売+^2*~A٦kw*teeD.%Wq+nmB^)_>eW]>׏Oç7htz+gQs;}ՍBhyv7@yG i5ω >؞Y83lXbǏ.o|Jegv3k젪H;d}rkl}F)}Dv`=cZ~v34v_] p-W(߸Z-ְ@Zowcy[~cmov4Ci *| 3uGWZ~M'`s*E 0)!$29a"lr1^gա>^I٩ò 6=y\n”E?ݟme8TU+C& ŝI҄E4a&0|.'Ire("o,pDi gְ,3jH]Os:Dq![i$"j#g 6/ $bps;d3㙏7|{0alhFT6@7}+y3 @$;Wzv=P8Me|!ŠCX(Qmd'$or/6hР=bC{"1qu"YoY'SAӬV{&g}X!DAq4֎%By*N=Y כB<4aYG^ؠ=\E@no}y_we睔Y0WVߝ $z&$#H4N QD HN 4uFZ[,%6$1m#8e%H'IjD ez06pYĩLvXCաXTϤڼ*x#x›,#O2R-Ir/HĀ:p4(.z!"QNyF͵:Dv1r;n k#g5D:`!j|/+w2m4ںa3-|4se* :IK[+. Eq:]L h tHJZʍc@G.c$& J/F9;^W ,ch.NBqSctD8To=K$IC(2r ęa:*ergIhdjYSJYɨ݋ew!KPV05Ғ]S<*$x $RG|"HQJ,G-SFŀ!ok B2)CR&ۥ!)%'〄&dO&FKS8+5H)+0,I2b|xœ<͆s|@ghj54 4ij""p^;ӎ K7N|Р^t.4RH#‰YmQȄQ MFui  Ykk$-xԑx}hP,8KuJ;! BPnFXW $gx/SO=PiI3R*d9eg6x҆ 4(Ѣ D "Krq X2T;q&zֽkX4o5u@Ȇh=>]{a}7ܾiAF axTFOD{\IJdNAW^" a.II s3!H"G!4 '|t Sj:n~~gqɟ;@FG[}'Vk-؋IZߥYп>~&`߆N\wJSB %yxDzy@c29?" ޒ %(Sw}9צ,GJh(Qľ(}&{?%?\;L>dv~C˶:k;[[*,̾5͛i=ѥSDA>-}z hhm ;\+.q )*9O1n nFtkxƻYpaJ̍pnLvQnP}i;ɽ쉋=TuêѲYfb[(7}GZ#~FOi;L/0J?[]6?0#k qQܿ&FAFߵ64ߌ:4D+.g+u3޳0k7p76eY56{poBђ״>_ IQfXѠlNU?㈐ ZVQhhiB4%LpZ"me\[{*'VuoB I@ *(F ʦj#.x/ Og5HN`RH[0HwZ D佖1Ԁ'Ṙty%dS`=Q9;׻(9‡T\3(8.4c ;:%'Ʊ* CMH@QQq!,$9ǰ[)r591ObΠI "5۽ e'[Jv?aߡh:߄wh}| ON뢫pqUy\X{ygKJ󬨼AziW1ƜXkȃ2{900.ĤP"P2zE'}Ԗx93@*72f#g=2Uu[b!Ey  g^%͸#Ǩ\[i7 J3oj#6 cRJ!n ZP*Yc Bu_":J,!T`gF428**K"tZRppR4y9Ri7tBg ,d 1+ k|@N@8{NPxX)Ba2!豹c[D$#J?QK>AADz"yrQr!H= 6}l*( mFΆ>k\ zS'/-f1؎ȶAx9[A e!ahpNJ7bi1 Ƒ0=BÇw 4ywYAnyo5v@$i+jtU}AإD"UF :2qe\;I ]NA<3L/|=#Ϸ}´CFΊ;cDtĻ2J0)TH#.JhwI&8>6reۧR5;m=1c-0GiS}M`#U*Iɜb1JL )k1N INp^$MǓۦgTi߮q?8] ѣDrc۶Q0:$mͭk3~aŸؽܤ{|x7aCNǗo?^H-}zYKz-Km$KI$4<׽uGߘyaJ\])Bl5ZH Ԫu15K]=,lӅUo/M}Lg02t j hP0ٙ>:;>=yZFf|'ٴ-wMVM}J_!G{(~_N49;YrT1|8|I߃uɕZ>;*_,&lwlj*qK} 0xaɦ]3A4vV8&iGMax䏜0MihC3 d1Wk;P[p9Q>_v7tlbŏ6s2wuM7ӹb0s/8uoYW-h i¤Xl u,R5g#9ݧs=iVw" ˇ-ǫ'M5\+X+dV[11LB{+(jZ5}o|O:cU 6,:cV{M83h7޶DŽU&yIv@sv'N3SDDc~cvf[t%ߨXkpF*IǃFO]RnS Uv$4ֶ?NV3Of g+A0G #i7&|S;eP{)l'rlA^{⑴NlSIavF`bkxUbV +:p*mکv} W (,LG0eP2&j/}0T0 3 W%>r1W@-Qr dO PL+9jsS;\I&D=\BRLJ`H"1{_n$Q)EW4'J+ `*+FJjp9lXoĆK/ijp$j=F\=JF Jpc8׹xjOɽ4*%\ >JohRB0ܤ.';a%w[9te-)Qfۣ]<Q-0AEu;ri͛43&S]uOrcAPU0i{Yow[;A/.nm d]XOqH-$r< Q+[W ^Zآ+em -B"%p\yRjDi6jClyv>}lI3}SKχu@_ #9VC=zylu~mc 5P %& kϑAڼS+ie\Rp:NDguG:0!S&aJ;f2Rʃp-1Kȳ0"g{?׭rD,0h.tdښ]Zcti}׳Ceףw&4ał 1%&br>ZO5sI1\52* b|SU&.AsL>5JP]?Mg0wgOW꣮$z<$"vV<0&S\1V` D0ٻ6n-Wr]tli Mv70jG$'q/pf$˶F%ڑP!|s9@'RP;0I8ë@ u>T`fEPqiۂhMIusY`@VZԩB5m~82)x#' 9< WŅ%ٗj$:::_.~T2L$InJA7R. h޵qSBf FESըƨѸb^zr|0]չUV:dW]}LN0[#u>?b\>EB{p1j.OT[9lP5 78~|oߤ߿:w=~1&?_ vLᠫ_jMcWҏO_^Uĥ;Wl=:?@]0~6{cL V90jua#`.i?^yӔjڛ5Mۤio.9\} ! 8vD S6M/{8~㊒#&0e!{N$TaK@*Az44R c(=a.e|GÞ1#{D2pN FNzy2IbHHwMvUxlsbg /<.;{i~VS %z"葸gУ}X>L椂!9Z1NDZ`Pd~sVk/;^s%%cj=5De,cvuv2.&+%4T|> U1riL} AQ6 r}׫o͇}:M}]a]n%?.շ)e[r+7c8g0o@֚Al5l{*[r.ouvƛ~R+B2҄hJ*$E&ڞe}(߷|TO>dɟĥUA1jG.:tZY‘)e|p{Ix t-N`B<-*Caj3jX$"﵌F]؟Fllh|^ ϞP/v>Z7 ()&=)N-&x#32x5sf"Tnd&zdUaa%R )Ap.Q21?:zMo f8}&g4 RK)McRBxj5 0R9D<&gU42OP` ƫ!#a:0-"rReFlGl;7 6qf=j v%O >F428**K"tZRppR4y9Ri;PBg 4d`h5> 'H !]{A=rTiLf<&zۡ޷&0/rCr\ RXC؉7qtlK>;Bu~xvPQGNB& iaAF$H$8\nq =0|`%O$4" Nȭ\DMUqLa a {d =!"}X{))8)XwKyMtH4 G2C6qCe'{K?_ Cû)[y3RTJ|B#UǰWM5>nZј~I&d]}`&a͒vXu,ilfl=M‹OU},TqjQoG+:'/[SZSˤʋB)sXk̕ Ҍv 4>^6yϭ4_L>GrY w%xQTG#Qןr^.:5e|ru2]$,kh8pW .-5o>%׾P,S]߾5sRⴭ_ uoU4mi;}j`־ ΤHi82%MH;_@SC@O;Wb,JْJ+ӗwz7wA7qZG_LN. W{d4 4ypK Ú[kִ#79+BZm>R*r=:u@3TH@LXڑK LvZx>3/!2e8g#2G>.S+SQWd TZ,`xF< 'Vx95V_ڈi%^#<i*PAY_o~{dE 䛛ՓL CeP{J9Dc 1'I9t6ngƽ-~úeu`1CYÛ韧YmқھMcDvuK9\ii6"gl`,Y0R[zuf+u{_9@W,5VJZhoZiel<0g,DIhTN[!Lsއlj<,#msp&}c3 92o Fq9!BZLTBUrFÜ [, /,eoĢt`EOc֪ ~7_G LsRdvgdt9v\0r: z)2@JF/tJÃ&^E>ZzQwf.&ۄΧ$FB;ˮ>nEq05wo[2^}n>Tgsᷙd_v͋ZCDjVSDh` c2j̧E▱?'8)np0%sH[ hʠzN<8Uz*k2Ah.PHbh՞k:FY^1p `$yZ%R&)>ZW)F"fOmνEI7g>Zz\ __?~o ퟳCܛ[o6Ri%X%b]$5gIFA$ ^4tղ+q1,NCׂkD-8S]%W,]r-m[XK~4.fp܅c9^/[p?5^aJs}i/q[Ʊo.7HldV8v2bF \0Ip٧$=gcx5t;y:g]&A{\Nq1%dn&L$D^ۇmlck1U0V',p_'&)#=vE"t[7)5Y|^GIL@8Xeeslu[f'0rkUz\ \gC vi$()j-oh m˯`Kf!yߠx`,MM^__@>031^=^Ej'c7?<m}ڵ 4OV;}>-96âCYШ"g0`eHI@gjwl@huB5D&ل*80}A؜7F48T90n Rg`4 uv|SWmE7޲v%"3m+k6$zddv #$f^:mK mKlCF@n?2i*C: mwOsu]BLJbʤh.e͐yAe#Y+:*$κuAg3z5%zhx$uJ$f b\gR3"7ZDtZBbJ3{crDM`8DVV<08k ΎWaVwMXe@C픐Qqr|\琳1F X /4dֲDOG=uyt @m'QG V8ϠZI0:/z;Np0ṷ0 Y#j!htdaYX4 P#7D`1 qL@X{S/'Ddbh:6v<&.4P;R6ist^fr>̩[)D]Pm-zD8w%\_k]Y2o$:Q,4D@+nS&vʕ#l` BrbE}Cãci0:Z!ߢvR&ևG$Udsߒ\.]%ebCBDM&dY*peg,{> S>tpUX7~u97@TQ3ήq\8b4Lko8qH _ )|(iZ}(» ԑ KCJPsPWҢ(elYR@`1~+P'gs# oi: Ɂq*O|O|bhυ̜^)[H߁`fJ:+]dIsPZ}˶Ѳ`e+UWr +UJ 7؈ٷ3Zy%0!ə}s͢FHAD% UV8PHcUTwLt.hFM(ecMcbkU|-U+tEh9szzte:0v*pt Z-Ν J{zte 5+kSQW]\t -Ha;DWآ ] :c Zn$!̎Mo3<9]s{b.~hՉ\l/ςtezznsT}W=j?ڞ;]+*!"(CW]ˮUA7HWaTg;][+UZ]ߕbNW7IWp*M/(~6 ;\?' 2_>AgCLJm:İX Jta Zs_$fU|\w~-h-;]JTOWo,gFBqqZ{sC=]EB.+_@EW.UAԹUAiXOWMo3 ;1`NLW5'^ e':ƊYЕ݃lOWmzQ1!* ](BWVsstUP ҕ,t"UA 骠T7HW CtEw,p_AtU*~tUP盛+)V!"CW^v]UA-ҕ0ӕ coN/!zӻss^=6Ğ{xz}΄a٧rw61離yc7;HfV涟?;>ISVD0]JC2.-[HfaFR$.f(sԈEhgJ&X \;j*e & $\>NlqpxMSGwoq 4SX[IɨݒuM%i2,">"tM?ԣ>,Mfb#._(ֽo5i~5*ɯի޿ac24^lD^Yx;Ѥ羨N۴*idj|Y'SR7E2샭A֗ _/q3Փbdp~m>.~Lz@b+ܹ*I 4O]-xbr^Mإ/ Ʌh,AEHT:S!B?v%ɱ)3Ha3kA|Xiu7oA$ViGAAJs& -8iő4:xw-wlDτRyVJme:J  b"92WbKS VwfC'3pV }7'o,xg,Yl99vQoz4_;D_ˋl d.=KioFLhd4\Qcu#Ij9EAvoYmgm^겧`,}PTL<&K%3Zٚ_)WqPwq5*/\Ŕҕ , })xm軿 M?k ? G_oQ?[!%hďDG#)zH<:eֈ 1aFƾI gB0TBzFDhIiUKIӫ,rL)zG.e"!8T^/t˫پZf*vGLjgjʓ͛o%X[ZdjkVCiAWݸ"6g]rd' \4”uKjzsTɖLi6/_ ڬ3ul7Uo|~g)7h!}wd!m<-Ϗ^=/q~}׽wm捫 [vml>{cCHd^_G k[R=ƫw"R ]t^N(7Hƾd#YYg /1 U:jix<@{ݺ[jKՖa˒"OU+3sC1[ WT!#d4+R?9Qrԟ]|}/]ma|ڧ _'&Ch\RrbІvF-J@6eSL`qcIzzMs[}-hPt*kGgDtqvanJyWT;bk8TyYXGe ӎa fV]jDQ& ASB*ՆV +0]ô&IPM!+V-V^_Uh)LB,%\RanlVzhώN.ٍ܎޼|Zpķ1L6@oč[o| bFBxEwb5YY i&§a Co) :L<UfW >Jg0tsah㳪l&h8jP9 jJdS RJθuH'.U]Z4Mcbhsk&hQj3Ɩ'5@Tu^i<`ݤ9㳻Zh3~2P8{'>,3O'ke!}O5P}^xP>bP3zhX[+z$-hxj}X s9Tp; :2@ CA!3Eϙr_gkǢŅ\jjv9f9DQlMTgkIΣ2XF`НhMG}_U(oN {vI7|㣲4!in~er\)^=h^\D.ZQFG1 ,*+Ĭ7b`an;[y5<پ-ט6U1cP Zot;M198"Ce}ԄBv02`Zy]:^6!/0BH;Y7r\ lɨ1\" UNV*,[K1Zju\ T/+EZBQo_I,,/߰1I e3hN\'֡ l,2G j.oϟ0O<6U V[iGmh- @Q$S4Mtޠ.@4*.cg'OTue,kt~vFωnyҵ-b# YC% -*v:Gu)dTuL^up+QJ2㨔1)Q&&bͬr EIy--(S-;6ҍ%x-zrzM1k_|}M>Ѳ|!Q-˥-.M:"ؘXʢ:PŢbVNcm7֪ݩ}-_EA/`εgd{jz䢽@$s5Nk Rc5Sb1YP[ȈƗDNEΖ&6ݥLqxũ([PS k\E>VCSj'!+0P *0 !}(.cI1E PhS Bœ:FΧ 2 . زh Q#V40 R+Bnb0\L(,!,bytvlQZhvG{K .&V! وZF8yjlS^M֏ kG; b˦m'ǧgZݍAvc34}1B9D|1N͉ΧsiW<@zݐ yt} ΦO>3.wry9-_|g}U%sT** ()*g9,/!sz\1~n+>=-f`kt.'9ok80zM9Pg[zG5L*/Ѧ$V2'Dzl>k-b|+,VijƖR^$;Mq&q*+/)WCºG!9 XHb,uQ!2Dh\KlOyda <b6 Y&nA$ V_РQ)h =W@J`u\],P]D5̑(1)  OTyjO[uO7g3+?<.s{E+dGMtF+WXs-yDlr^V`rʕOY4,s $of1}r=U?Z .#1&k![ @sNV xłLpq&=*Jm[c;¦B\ 2-ɛwx~WʽZ'bɜ/=u?IZ*UO[jo>WQڃUe!/njm Ns+e wPj9ƿܚݓ(?.u[]~yË_  ^s~lzpZ[k{Q$6ly4Zh6dƑ]?Ѧaa8MfQx~QmN~<}\ӣ}!wGp4ꦹ:\-/e l}||Zڣy՞R?c7:ቿ6.OP?w߽ߵ?~w~;û7ݏ+dθưi^ZW--iRz˙!p۷st4_fn VgӜ&tҌir 0~Y>"@tShC5w94Ю/|q9 N q-UH 8 /]}J(nH;:Td(Wn(ZLl0fA͗j'_(y˞Db@=ey p+s!T>{VEgϤm7q5JE-NNuc;#z-i s^&Ir,,/W|6FQw/Xa@<Ah*˜aS"=)-۬'eO=&Gۡ)Gܣ}z@5V9% A&FA%%{廒c$l(ڬC>5jr c(PUݗ!OVJ;Rg2!'gr_ ˎ@ {O"/|Fկ?Ϗ7 F:li~K[Ц w> ySQ~NoWu'hW7n#zTp!jO̧`:_ ׹PV 8$i0ʄ!%&hAqyDY_g_@|-&`"\Ex1qa"m@ISk]u̔L%1tJJKveg']7rt>߫w؅`2G{7]+Q<$§SCbq.#[!թ.uTu&RN$eP}GtQM!sFl){tnlq4icS|ڎIb9 <~<H;osپ|jErbNjK^5ן [4Fz1 :dSehŘ^nD@µ̪ftW3dTQiQ=\Ƭ)1ZMLN9LJY(ޚ9k~Xaq8㑺.Du+2p+9Frznlv~6]r Ja>zB &s r `yD *V h25 24c/n56LvF,Hm!Vm" (>> 9k|<& ^X:km5jڟjM%BR5h1T-*_GK0B%D!{׶ɍd৙-Ƞc†g,ef$e%A%i3aVn%V륥J%#Ɉ0d˴[7!If P;h5kJUM5,ıe/*Ҩ(!MdA R;0;<ĈhGĭuٰdweB1zli 1m-To87˕%Ӵ 1r.8hJ΂lxigr®uE 6dvx2sY늡|\/is2/y,.ډqqq<^ FŒ_rه綌7ЎaҸ_ NC713@Xrog͐#ﵵ׋ !f5Q]~|OՏ6F'~<8_[΋2/? %8֒Z?ޔx$u_;= 9Ðe?W;|;`} ?:.*(_הIvFPͩIE7ײexs<$TM|.__?m۾%ϋXidӅ,8FJw^##@ J-)xZ cU%fi՚'n&*S䲝0="lڍ yh"R$j&ߢQ$Z4Bectpz)ŅTѥ/wl%kidwYi :%a2svQ.%!~A)÷}%wWlMq|:yaĤ3=|?jƾVC<"+l| B%dU$I=L<ǧgzFg<dz5bu FժU#ZU %I=L<'9~N=#NOZxɥi^YUĿ%\9hZVX@Sbɢd~֛b$f?  2AhjeȻ ą`D|i [X~g9{@/*{Wmdof(]#;2{nUJHe#`1iM]Fn;WGu׿~wp__:73BԺxvtvuʏ}5K_l؍`oo#=25:qXs2/êHx|@v2l8Ԧt-7'gXqԄ9tk{!mn=[Ł+n{xouM-}ewTtח.\g42ꢗ} :+|3Uwl)Ntz}_t~rW Y?sGZ e˳M8CSޭy؆ N<ܹqk3iIMlN7w߭na "p?5? Ok/h:7/ڿ2۩ bW(bC4'Qڸ,ymj~e _D&x;ӪEHEL9jQ3(V3%7MV3騚B?\+߃S@{/\|CuwZ^^&}K('WGzRX+]bOFJ; 9UEU++ŭ/瘘G2 H Cb@rӶVg|1H*+v'cUIy^9 %}7/qRE=Q3|RIPtF|+%ST",!qKZT%+uxȬYG̿ IC>N]5SL7t]׵?aEa APWZu>) /i ]G]} as#7IZfKOj!.|/Z K=Ά}ÜK ſ1J~Za'|9Șk߽oKojkWU'_v˧3|wu8 ҿ:}h/IoP4ͷӣ |?fJC=zMM]V| 8{{|~zUxz|\>̿[CA.%mC!jmC^?HC&&ZPƦ7_LB?ӘkHNί8f*kÄT 4l*qh[H.x<'Q~J=@`Ž^W*Gts=;9LRo>Ix&^49%qsvQ|OBR>7GM=DUssޔ$@s"HuNFi@O0aW#SDM QkNҩZF 1oDM&*Y}W!Ҕ*qSU eZTC1hs %Չ|wx^IXNX Ք*h,{RD bAՁw@]VjH^#RnS6*ˌohZE;f,)4օܢ@i v+fذNWp A^k!N8`2.*&uuZ,J>һ1@bjiı$5iF/yX@Y'E)sXE}i}s?嬘6 Xx0ҐK ʥP &RKFdX9LilC?`+4*V w5TXv ,s&2m *\EW:4`$D5n,V r ֐llڃb d*3R`Hq 92h_\!I{֩RH6Yo৮oA]TQ"kxIYH1Jη fyrLu_]j-8PLil-@G@Idu{PH&! {j>Vܸg8M'2Йn }rnme/f**"1tjpBh80wad<|~]v&>_F ^f}D="?m2. A mG"  dz@X8piTpҧMg%rKdhR}#'qc)@|I A-9J$@d"kQl\p⸱.`Ł>S!^8 rHݗMeee[+xam@CGی 2YЩ ֏oT>65Ww3͠\,"(Nk".ڽ;ܮ6ٔ"w<)U% EsK,S܏WP X;KR4 ЋIuD! =t+ R{eۨA׀R")e`[{'An%hvܱ-:t K*BJ3uƑ&匔#3z:p2$@>)Y>nø63)H̔'J@`? fhwE< W[0<) cκ 3A݇Mv)ANH, ܊'`n1VI{Y;I*@5:k4LR'޺M74WoEU^PQcA-CTVP¢l԰ @B},[ݳ+0LCTj`4%Ǎ g/vhY.Uy|xM\K]$b` MudlhfѓRcP%mII&Y:*5ZGm$󤑲Z h}9t64)޵q,ٿB l7#! y`~oЯS$CREV%GȖJ;UէzTbjh 3|n`xS:3% 4dsk(aKh0w`jYT"'p' `!w`X76IW>_ ++6U.0 ׳|@>o@#dFhâHY:zVcVXZ#eˍLGjRCr'Ǒe<(`ҧd:bUI ܵ3k$rA:yG-/7/u w0(8k 6@2j`93#Z`6WX4 a%xkrV;M:rIjKG@] v,Y hR<I"~!{S)HK y]0F X4[ a^ue7q2YlX1,+PuYAGBÁh8lRʈXv{S)UAL$yѶR*+a.®jf.oSw}Et[3.e%Ӑ_nrY*m0!~~[IG)qt+GAFLPzB+idW@=H+8+>C•WD"pE+"\WD"pE+"\WD"pE+"\WD"pE+"\WD"pE+"\WD"pE+"\݋p%)y<+'Ci|+)>K•9"\WD"pE+"\WD"pE+"\WD"pE+"\WD"pE+"\WD"pE+"\WD"pE%\ᑕyJ+pJ?kԓ!\i{28/F/_bz*p̫+@d  xC@,@x5~QPm.tS͇Mpxloٗ$C'Ppi+y gdlDΏ9r}9Q\Zy~y5ѡ $hg)XբJI+o ~ِWax3eivJZ=MS?S]Ct Wu`sHp-n*'RQ:wRu֥Zvuڽ{DV{[ء/7nNby9Xo'< 1⬞ &=`_ЁK 0Pe vך `U/UFJ Q9^,1& ?.0mHLN3xƵ:9&|D/0JI1d}ٲej¨[~p`+#dcym:+\}ؓ;Ǚ;yW0%̾1ޥ]gv+uWe&a4g3b+ۗ?٘g<t,\phЍzFmpz˄F0'ζB5o ЍͳEXfɢJ$ݾkn}o78= t)I !=p~= 3<a[_?](^})W,=|Uui3ֻ*ǹZv-99.5[ѳوw{>=3?= Dk\p `ry;{>;^q)ϭt~vM8x\fsN[CURX}@{Wk _?^}C!@(0[a]yv~1Z\lϮn^g%wA}4ucr׬RBh-uNB)[c>{KGG&IX%p|A 8*VgGCFV{5Dh '!+&2*jF5:~NwGVW SuS FlX!]>}=Xy]wǐkվ dΙUӻOukމWܥ=1C)50rszh#kaz+Oa<=U([ɸ<| L:x=h##-M:ޟj0O"aտd|]y\Wt ?-&W6iϴvWuӨ{2[ȄVӬW&rheﳲ]vrvDYNi9P\-rׂw).;A:?' m,_4E!&pc9VWo8^zHtY+uNΒPR, A2*BM_!vHo 1yENUɅۍueNo(Sׯ{OE+/b_InKoWCfr4WJ-1MqrOW+m#h?8.[gv0чnrЪۆ8+փh>a+MOKr@/}śF3j%mO(Z59?u;1/$ve(0U?۲-o}A{:Y}|.;Ǐ7}fal ᦸn,ԆyM<ױ@&{RaI]$Q$0o)]QFۛ客gdg:iAS57w$aֶQc>`%LV,,b ekӫQG;Mv՟EMQaK]L'p<ʚ{akSNumTI@/d Y34KGObDxU z%lti"!izD D& jb_Pi[{\ 7 sNoJpsrwӕȔux#얁Z8cXbJ MGJ˙G/c^{.|qX=tGl[ \Cݧ|:*ˑ|ۻ>l^ c%)I;ML,EJ{S!Dv1+e =,כe|W_#WP9LYusI@Q2c ƪ4~x,ћkg!7Fq8 t]Ƴ%]MF/-?}mOL Kmϻ%8ol2jc#LL}i'!UԘT嘀MpӗPQ9,<;'tԸ5uc.CJg?=Ě#gARXe2/I,IHIx|- Dn} mGhۛ8-xꊇ~4GkM= H|TaRM{I^'5Ɛj6nt~ϿW& _ku׭}7cQюQ-\Ժ;AO1=^"{!-jSW ^d0{=,r2dW*PmUT]-׶&I#xJQ^dc(85f$/.|j3}D=9 ?۲1$,3sNg˥7 Ǻ8+흧\.4yq vx7RX,ɲ8h(S։,dY'],9R| 乮 x %BiGx-Ho81©3Xd틢[=1EQWPd"KeGq~l,YrqANJ+Ur:jW(LHȭvY`31cx;7{~d4*3Vm웈0ƯOi nv$11suV5KZSTy.6 *|e1' GqCͦ@dǁ}q7&^=0}.VAO |<}B1Nxqx!EY{mMr$4F7928k# p&eF({#<c8cs|zہ7yn͠>2/e62'xֆ+\)V5`YDa 3kۼ "*)hxql}+Lk[L.`1/<S#3CO{{]`NEVǖݖ={}WɲMJr$V|)nxX<5/$>:W,D6>#A8^3i/0麰>$oe 7L/w=-̇3jvxbi)#%yK,N7w;=ލn|NEq?k:ӣt.e!9b_no5aWSois Lw+k>o)CwannWz47?WJ?\;Lfݍt! K h+ЂhK=,STevw^c|31i_E5ՆGN[ʊq_;›1xkꭙb,W7m Gflp}dN&c1$p>3tX_`(ѓ轱.yKtϢˋjtT+PfxL4ي \:h%$-s7S=Оӯ{}Y\hҫeʧG_ƓLL Ne/ RvVZg#M! A"0HpI9e HlfxEX >'CR)@;8 =0`0٦wEdI^$:m^lib.>_p&6Rmm!T)ZTS.dF$B_йE%yf9 X,`  Yw@7M&4,}Z,|l Y=*HP5RXV^{VX*9abE\-}1Lh#R73F*j^ 88]3z`* Eɲ_Wtlw}- 4'ذ4fYo n{fp̽cΡ܍cnӕ8kdeNcRWvAXYbq;>'pB#L>+>aM! D1j f+T~cqKm:x>]5F7G9-xG3fAA#8bXFC8JL dLJuM~Xss+οXF!ڶ" D٬3PH;TN+׆<:)x!d {ѹǮǙ<I+qbJ"؄@ߢEz Rt 2t($[B89W C:T@*Z ,Z2'v!=3P )b=bId]=$),f~2@`0..e̖/ڸiyt矓,= {X+LRG8\ ʩ+Fbtg`tK>ϬIF,sOǩbz]}?g~j^Ԣ{hoGlE.O阕FLykl TPRHL: tOA>'Zf*l(@u-[M$\7Jղ'dL*EyrSTIv9;= y)tiUzFgW$Yfy}&z٫_uZ*'jmU\XH#C;Ͽ^^q[H>w zƘGսT=oV=A ם9 Ǔr<><-]qM~|r(L{'h!ֵdږuͨ͠]̪+Q3N=ns|24U%6V׍j:yFjÆsUWڱ2W@NgwXtEl<=?w߽?}OwoO?|˻x.2pn\ZWƘJяőD^kuʸuM(-2gf͘ꂦyk'=1~Ztus:pxMj4hPoӴm9oѮ6-ok[K'W!i@ wήeݙhQv5 _D*2&YȠsf\] :dT:] +"Ʒ vҧv8,Kׯq6 S ]tE1{(.&UL9kМ$i8irW5dbBk=tt V ;/&\V;_D·Tz:ZHl*Hwf, >/F~+Ho]rz,QOcviղ!$tLi ͡OOV`zSMD(q-T{0C˨/|M-!2JpCGy2ruI Rht<ɽ׽ٻd }ϭY`<OB^m dK6{ok=mojԖHKi͑1?~5х4qO^v|y8LD(b}Ѕ\B.J2u7=C NQ%3j: L1*#0JRSjgIr9[Sr(})j 1QKTE]Pd<FEjl|ި}Ϟ@/}r|NrlY'(J AZҥD(L":kT MjfU &W@ 5?Y?I:"q sZhRtN 5^ͺB͇U(wϮLN>w~Uwjmm2y琚S[5Ӆh!.s!m>[./j P1WPLvւ rʀD\*ځؖlԢ+)RɄ9*\NVZR +֚Yw֌J3]L2 AՅ ]ᕝ=ie1^Mx~gp||x<ˣklBJs.dGI QgoMD 1R)C*FB< IUZ@MV+l Ei 5v<${`[ISl堵TJ /,P5 SV8L24(@#49Ӟ.PD#5[Ȍ f+ ;5) *+$1[/"TLFuv!4ևͺ>{U{c[S5jՠ1N4*ݝP.)gjl*$$BNIe˛X3:.S4mJ4h`a٪LNRLLPgt-i'5Hcug ̯:^.2bQ8ב+i} c#[5ٸ1.9pθѡ0:EbJ&_tKF0hF2@$oKP>I7}FeLҐ'ٳ|L9"t:>Uw@m31.P^-Ǜw*gvz* Uhy^Eg0׳+ s ~Ɨ_4]Κq/I4H;H>GNaѢIT杢bLg +r".6.k8P=+g -͞&{(7,B}5]Fq*wN,8 Qj5"V;e{Pxx 0RaTXaz0Rrqd%V@r RhqGB y>}C)<1 9s*ꂩ^TMDŽu1,% +HTȝ "tJT@A!T nV! ZĜSŔT{cŇR XhNn2Glp?}#=H.Wj :Ȁ; wA6f"]jTA)o3`03||WM0?m2}2NuGbv}~<= ܲ}s;˱͟hb(g *7 --4eQeZ뻚XoOd37Q5ub̞V zvv.w9z.+eW}6KM7JD޹93C%n7F:m{|QyC 9hdPazxV3 lNLRnB{4y+>_Vk-{qMƍTe$;"1`HuiR&XVd*ǭ(8hXQŠGcYm (=#`R3ˋnj`y{pTO?&:#l~]nv{Cר>Ƨw-K%QZ+'b\:p%#ҋ8z.C,Nm_v]9C"e`+j[#Tɞ{6/"j|RCMTChOGuxvma:k5U`C:Uj]GWmnVzgvhWytf'ݜ0˧ WZc[ϭIr ۬ 8wW[زRkn-o| ;r==5͞/܅lo&{9|C$MϪ^u6l;t_avU׌u 7jr ?}JNu=pt'ּxc턑Wz@!;>-)4קey%K՟VkpZz4>As9}]wȀ9=0"ALjM<(ÁDVHu mm"fURowE+c)-z"KUML7E`s*E `<$2ڜa"mr1^gա:^.ITdYh 'FkFlJ^ΛvoP;F#J^%]֜8jPL*K9g,z&7|.'I`A84˃$ o@&Yfka@7:Eῴ` yt@m.G3@0xC>PB2hą103m8'Dt N{gp 9X&Y2>i&""AIe wsݭaB,[8]}C5UrkgUoQ bV U/sU=N|e÷Uh8*JPϿ~kջ°j.bU4<'O;yvɯ'y$''ɻg&_\7EhxqoS=O58Q=^@ܱm^O^iӶUû鱒ݏ l#g];ڑՀhbf2`;W׫&ٳB|`|?A L 'M5L#hTͧef d{ /WCi1BpPE~eAK._m/M;Cv!yWc^<ì%S{taP*)(2$qB="b6Mt죽<>˄!G a?JAљB-ٹ?M>$a|7"yi96}ӧxX p~׳þyӉKe}vz8EEIMeh@!Ǐ|=?SqR 2|FMρ8Oa6+A.Y5܏]Ef[J3JpyY roqxXͻI %=x%F' aqNDCq3L?OODOD:"8(pgՎse@bE|`H\oXuS-sJb9 QR~t! }p \HMȑl(G 8ŏ(?g"t e{FZ#Bi:YO9TN~D\ D304ɒTYgF:QW-aԹVCe V2C!R%5 sp !ZFx "A5hRxxl"Bѝ_'7|05k8Z8P' WӂC rme;&I qq Eh#ASODP0ydou7#ι{/ ^p!$4ԍ*&IPqJ`.hx$NGřMhIä! 9R 9",3LGLxQ<,N=Yog-_ج%u8zA 8T-NL>% !ͭқHGDx" _ Ou9gT:$y,SFʀ!mksb^fC#̢B#zZȟ;iW?zO+ȥjKKTb&p A@fƜ< ^GGO+m{7wtL5]CrV#{@"ތiÙ"RQh;/43)XIAx.YHVZ/|="8qsrP7ى#=\e MFf*[ݤ K6GmȜoѳ|CM\! {s06pgߢɊۋEI6WOxwѸ-ȡ'l`i87A3I^6^La'#7SքTsIMQڒ°0<$D: DFPM8upBh[a,.!ǽIP0 Zv)"9=.K$뻤G{+)cGT1ӄHw7|/YmqjIfT>qn@bLH~CR$IQE"QA^K?{Gk]oT mT. #hJ? WHR$,!X <3/ߌ)dC 0I瑻S#S$s??=^,:q{`*q ] ؔ0_2+Y;?'qk;$,Ci2Y(Ơ"=xt1a))1 ,\dxIӹQlPFc=j{{=ڽu?&cOsM,I Sͥ %DT|Z/%a'$#`&\,SR*9H> †$\t*|L%QU@ttt(!0f/cD9R9`SY@u/=La1/pDBG($2mbN0)Ȓ֊4V) R@/,!,H J3u>/LJb"*YQ0E*v9im{WT Md9j n|!Q|tbd sd+׮.D<ȝ.*meL΀g3]ioa8 Z6;-hwZ%fwhnԀ%!\yo;ն}3 kV ʖ`-Uv^iΖ&@E2]swǹ,q{oX/;-o< ,JJׅJ|b\Id'XxowZ!J0HP.fQTdpΘKZ^ ^9 @N0feє`x#Ē"Iݣ~px0*[mwi(WgZ*˵ix;8߷b 8J,tJo/.3\نo OfL򇷒ϻ[=y&t2$XYp%m.fkks1/-Aʤ(eۢK`֥->w.ɍ2͙+閷QmfC;K^q&wEڕ1-JŋylT̟>xy>;^*9[UgN8GjnW $%9GrrGiXWK+oN8byCip!:Y +Icϯ)[h 2mIk1y4ۈ"bod̾\CDq\ʹAԭ&|NN}oovC.GiP|bb [oY?&9{u`\"ԠjsSIJV {4̡,73u䖈R.vR\\u,8 .g>%/54pƃ+0`p4- won]~n~O>;M:eJ^[\R%C5]kl^̫nxc4:ኾ]6o[|lxmϴi+|D5|506x'rG&rhW AF R3B=:I}h9px{~8S &Ix]rJ B39s.`h.扮U5ٔB)aT\FJ%.2C6s/?S8V6UΎ獐}@}KӉuk:)E2$|<.M2A Jc="Jڄ2(DܐZϚOIE+&uNq5FkʂZ;;d::+ hi:+YB{pU['{Yد[s<;wx}݇EiMΞӷ3e53tW';A[R f"ƠCBf1RyQf D ۑWEFlF12m䂠'Y> ]*؄8*dX;ۑW# %XXx٣2ya`}~0M>21%D뭵!Y@Θ7R2b` 3uzH'i(^@Q ) lJchim&ݎ9Iy 'XW;PlG8㊊y0 ye=j vsD{9gg'HQՒs+ Ԩ ()TbPJp[P:u#gml4W!3䊴h`b`D Tn1kd@JuvDjَR_.( 1`<[k}싈2"{D'cݝU@dBЦ/\%gǨ2DJ`7H7NZI"JJ3#8L2H`p8ѫf!\d묶JEYe=.nZ#[l nI+!bF>+O#G{W˖GCaձ/xR3\_tN=r^ 8nM۝z~S^qr1nVҟXQ b470NjD=]rR~<쭓=rE{(%{2KTI8d3,3ZEY9rKD(ky{`D^XMh@DHꬣd/XtRɜCe 98ın63nUM!ry_9Gk$R]6rDe`֝0 !})R)1y8->U;jw66 j|1EG~>0%)/w5XBkHB$4gg`EU hxlv=tBO<_Xge(dZ :  Zq PİG*SzQYG7"-x^;ŭ8-dEj=$R3^y1Ip@41'z&耄ԢCfw5C!C:u'RT[eJ|^]=hxf_8*SwF:gxJJs";00iڸV=]ʷkVNߗ_{hGio?}Z/FڌR3i# &w$JgI~/ALpp]m q ]0njgfM;FD,R%Wμmf绘7}1gQo&yQ~apxPP|ݚu9-Ts: XErN8z~giz"}>wl{^8Z鷝]/$p1}Gr9ppmWe0p>Z_*qhi|Qt2§o|W^ ŵ0F*NVޡq>[* ImNj}7pɰMY 87 HY8M*L(;s[@Sq77v<$Kj+-dvOrӭmw].Nv5'ӿRC/R?-s[T#8.~or$k"Zl)QB\ݯ6h~^;OyÍtNφ 4Xc/ >pW:&hQjr 2* ƲLuz>mhh/lکkZYQK'jX3zk;֚-w< sPdU:#que3JQwI4eςBAXp}Cvf =?fp8-$ˤGٻ6,W}L^63/zȢ Jy }O5)R%7b|a֭8XڥlL9]nh9Z)Nu/V!2TH/h}^Lz7oa:%w {rҋ@lfŻwk S_5/φ=}uث!3B7ha-oÕ? X֏˳_dH]է~Ӭy[+o#/op9Q',w/NV7}ݷuӿ8^ߓ90+kS۝&^_g]g˟Pf0oG?ܘz|}m؈^k!X_v uﲠ-9]-:R~}[n$gus+Ӌ&^z;*pVmCW8 mH1/*a1/]qgU,o7nAAx\Լ]3;)Ia0sFvBjRQo>Gx,*IY-R[eM>JeԄwnG_7DAC=lgb0 \b瀯_lcgI\KѪeÃxK[rH!?-$MPd1LףKrAHE IPƬoSFS**u-I?l A)nwﷆw-<)Z zr^~5 Rk䒔!pTT3&KbDhi{vcM( 1M)+djޔ)C)Fa]q_{ DD0S puj,C?W"nQ6Ls9IiD*6LaPa2!cާ\1*ciͻCsŘӯB,={.PFLY?Ƴqw>fiV8<)8*a}J?W'erZ&ȞUY:QS#J̱6gD$GY0ۢEE2{g`}^G% Mwc5H؉2VM`r9Ư#|2_GLH"Ylg.)6eT57%E$DHȔ`OZjKU&8l* ~"߭"zҾuړ(.zCR [AioeM5*fr ]?PVc 0gM(tGhU=.%ɆPa*I%CLxe@njl T)%[0݊,9+CۆK8FQMH"Z PbdKw%nQd0 '-s#'&t9D `9j0ic|斒l2" + ݻZ(Z0*aUcsQwS'-Mnh+U!R%bj$ ʰTlPh5$ÔƢXy/(V_𯜀2E@Lf2ji vmd a&d!f@57._s*P+1h aƣ) % D&PI44g97KdY&ysS 5%PAGE΢I nB*?% QvdrI!0 y*ED1nGY i;XCgҡI  m'+Tpo*>Dr4GSf4/Pp4gI8(E{eoh_uT()xf`W6;'h2Z#1IQ@IFQ˚':FSNU)QNKG!ABi5]V+ SN |u=fVcᛯyk&Lo A>뗃uk^LKQD+f1De ՉܡIJ၁W&eQa[gv/ԍZ.4mKF#- 9/mZ$L>^#x(u@Ku@82(}vG* ]45 Z.Uc-EOpE A-)JI VWed䓢Z0Ӆqca}J\tp_R˨չ;7gY`GWq.cܤHp+QϳW5 VwFTRàM fBNA}t*s蓹hOYsO/xzf!B~t)]LAjx1) 7D. .B @13nR"2;Ex(%'&b%scǂ@AK^/B>J3:hLMD0.f!hk 8.xEȒ,B\5vƍEQIL$E}4" <@{pw FD֨Ww 0$1!d?}X%M t")5Z'tXWH',Y;5V,e36RIӿz+t52ߤt (j/Vp7] !Sj1hm7a{|MZ,OLHQԍ:ctLGs acJ`a]˿4ENmQ ƬwZ QeѺ:I3)#<>>&|IaFhQ"}_wCR^` 6%T2pyM$I].j:S ,1(L $CB6(z DzE@z;5`}vNM6T?f`+BJ1$'WW(#7h.z? `\SL :-FjJ xT'#!SsaƻuSj =JmDR1uTl:#sqֿ4Xv=P(-g/8.d$0r$j?U'kPuN{jُst ʷҔ/u|f1(TBmEUbmQqc &X)*:LZ8`m zQiDF;Lr%#p{N!Xg7vҘ%QRaԇn" sI8Y 4iTo#I!:.P,=uSJ.Z2輩GFQ֡"t}Bߑ`j?mz\^ܴ\_jE 5zyR͑6w['#cw'8qqsBWb>yu {1?N g> %P|J mWҀp@}H~sWJ-Y %*dX J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@r@@:%AQZzJFg}J "X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@r@hJQ\F tJ T@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X *HQus9%EyJng}J m V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+%Ч[냺Waav'}i^\P wgyyqr~\)H }kۧ II_:]9.N8)/g ,gqvR9ڥeHE|YիkC?Wˋ\/n9h~~vz0o>vr;.n#pZȭaIy~\_n)fTO2ݴOB){s S7U՜( QJr{V/uہDɜIi#0wO?|z>?)箪8sD6\ ̐௾|Rl+O 箖rK̽ID-ߗi7\)ېK{.41Osڼo[ k=ifyfGdvMԷͫ6,t,tr몿9j¤, Mϯ=t"8}Ai>q+aܳ*&N1>DIW.̓@6rY>fY*,gUi1_?;Gֳstxsu}gæa9iZ%C Vg 6w0A9ӃiY"yPY D >p"{}&]9^~?S?'i<[ŋ)W0u&z1t~f@hyts$m2K|v\+bmD?/_ӿ^ ȶ0}1hW>v‰,P6mR뷳iJQF)6J*զ`;,οꇘʧ͑g~ryupMW}tk aϋoֱTOrqra>9 N?-@gW>AɏNCye+sBW/>g'Tv.[+?#9!ڰ[C"  qb/~J\)~3CrKHjڙz|U]]=VH*gگכر~4z6'溿e1v ~F0ɵg2+KJ;?}m~.ߗ3GrWj^I]b=h\L۱46 1o|7nv6_ 㢓CpYE@̚G|Ǽ4VQEpeV63gj_j9߹նLe$*C՗·j3݌¨^ y YSȏ'qTJ+e R>SG1TEWKY6JP>FGD:n8DLd:%"xby<&R0cb)qqF{+hjMZyIMk }SAzuQ LA;[,}*'zpk0O tG/\PTKxoj,|,4'b>XLdžXAxYxXH#)a|q/nPӻgu1FU yIf䲚84e7iubA:_OlH ` `b0;DuiAX]%Be'Q~h@nTwE;-M&ddf3?O(v]=:\|5l0/س,mDn%$L3uhl뵽{ [ǧU>Ӏ5skԴ=LgYtfsWuRmͺCh%junMg=/٣N07=$><m7ӯg &qQQP.֜t{K[hVl~P??~kyZ} D B!W5VB-̃v7MheHoˋ;A[[ؽAV;Xa7Vt'6Z>W4p'GLt>St_ixT88sꏟ+㭙QQk"o˩~,gp2*D~T?Us7>F;ǸD|&P)YH:*<8`u sfS>;3/3$_8;8=ԯ˲ О]#0t%Ԛ d#oѽ)N8*iI4H3qD3컜3Nu9dƞ]8X)Q `@0X3Oa3DPxA=Fk`ZDi@dZm.<v<d.uoon\gȈQ9u4 bxHx+zo|OX (c\9"*F>eUG_=ydC5lQEh>ecH09d LD5=v#m^f'N7pFhy/OHٗpn۔l m4gtt$t$0χV>ګn8Ipi,Eːc"I0U[4"@c֙c3H#Mp Ztz밢ZȒ~$7B^Q<1C* rM 1A)FQ)@#Mi7rv#Љ(ʻ 9cm6,l|߾4Eۄ78k&3T."Kr{`Tʜ Nq>GFтKt^EoġiGS; پdG, ^YA+^|{ohMx`QP%fo`9C-tBeG#C:pTgؾ%\||q{rG7gsñnpLFC̨rusE(@2)FltKunWrv#'ȯ>iٌX6V9C唙`<m\FR#%LǓOV҈0'D ߄K> X1c0e">@1j0k. Z䢤gH9{HD-44 "hāNוGT=?춨+8@Ug`㘍B|&@ekOMU?ƣɴx[̄Z\cŻ.¨1 /㛺w|A_omo~y)yxn~-~Yx4O(ۇG=e9xx.5ث˦ï:n;l^|0a>Meo_-jVc۝@woi=SK_XQ`)@BB]]UQ&͖7o U1JEF}Qu/sD3F `PkK3R(o4XJt=1ٰel(ܘrhߩX? PkTba5 k3ey7[ۣEn6QmT Ae؉E1 ~H^ġA0(u s(Pw::p-Z> omoR@HB#`ɺL#_ƹKJ%\_p;?w& *9K1񷡞,1wruK憩*܁ctU\f4ia[oGz鉶6٣0|{W>_R(Zt%.t+4:ш151<"Xdڨχv t @$J#-,mL9sx1& Z" g09{tL<KJH[.LH p; LH'M`Ȁm;$O; \n ̃lsKJzBTtiNp Z%k%ΈtZT,W5c|z CcVJ%)MTkgqɣ,"\L<\E˭5kr xEF~gY{};2QҍkTV^ڲZq^:zŻPE[f<ޖCi;4xIC cvYdyP(DsssS7"  [U:H†ꢌDb,DVct<1yG&j_b7Xdp$:fhu۩(dž6DLMJ]^nQW8pyH4B`M MyG:5V!,FFC4gu4[mMr=)ZKfAǶ-kq+|A.}uDz)ٯ8{G,>[ݻg 7dzqH~#!b]B)bcH9=*Z#Qx-_Eƌ)quhc>#Pɨ HM 3X0>8w.~!"VNyD䤎EvnE#̦["fzJk5/ox Nh5bc\Q6ZBIbtL$Jx+2>>b`ApGD Q͘\Ɲg_r^RxRʍ!&*/KPz㔤X)]Ѱ gee@ `H4+LꨔɻP=MBs OǨt&akXr9{Y-W1utQd rA\k[&!ͬ!қ GBY=z{-*=?AȹʨPmmY>eZ &ŸLj6DkNEzcouI'$X=47ǂ&ML]~ Lrkmȩ],!?:Cҳt1:avM j,١9dpN69Rlʪ95K& /Ǧ)!5GS0h&)\}ުpުpުp[pRx#VXyADqL4fa0,7`5\|⫠ъ~%rpY[ʅ^:י9r3<4ZQlE^0_-kO!8E}$<3'2 #ƘC Cl e3|҉]۟sp|T`zSpUOhÞOқWa\àIʨ+6Ժh?Zb0:"LJEM,)f,Bu (VI <мoͷ|_ L[w]vk2LdT`=71oyXp'%h[W5mG]9?sc{FZAZ"l̛MÌx㨨l#{[f1ACIh 7eKGe<wque8瞣dg^Pܐ,H-EǥL\r.ށw*rt5rvLDqѩxNsI+Dj6 = ϚN`1CĄJ;)It1-%BX@keRWDHIՁk0l>+^>rL2 \1beAFqѧmUIcfT?j|Wt]я.o.&.()zo6W &FIg2=\*oX:钵Ki㪨Í㕡ULU)Y%5HGVrI`@QjmBݚIښcQ[3pAГq&fD)ڜ7)jsR9ۑRH- B`=>)(*6*q6y.ӛ).rD?<M[3s4W0gɔ2LZG,kf]62W`B4 Q0A( ڠAKx]4l;f]%t&`hl@ZlG05`4Zܱ/jʨ{5c 2TpnfF B&1uܠ+eun檬,d $+!šjEMc*u1Ȑd|;َQ_j]c*x0zl-EeD="MALvwRx0QUlT(bIӦ/l`5Šj $8) lh*" 1I' $H9qeDFvD,RiCuV}q*"1COəm* !Dqŷ% .KG#Blz\| \<<⡬r?<|[Eu$yG>?Omż FOZ)}m]7 ARyօs M68-bڂBEM}g\"ܚ^m)T >9xɤF7r`dcɬA0-Y>cI2՗ɋ tHa{F,c4 bl9a[}yy (5Ϸ~,~G cdSFM %Rm%9M!iopL$ô*JaJW 0  H0) @s4* ,H{Zh$KqUܥr=(2eg5`܂cxJRqT"[bRQKzq'4:M$shN_^1s7m Kq@Lo'$J7jSV~p6h} "sݯmxm!U=quիKTlac{&A}=ړߖ"e?&yQ~J>ѬS#iI5#?N$$kxFq<$zMӫ՜-c5?ٗӯ:I0$|6SPSa^Ѷ|z<.{61e5zոm`О>ʗ'!ެ_KXZn|ҏ#h'ˏ/ӇsӹpԦklݿub''l/Y/a!pֆ)Ng6;)T7G$῿IHeIa뷉nߎn<邈/w Z3O~L.FO!=(ͺK_5[x~5;[źr1V-1pGӳ܂5 ǫg~Q-IΑzxaD0Vw,?lȌRV]D-b av,?{ƍʔ_69ʸ}˳Dwق:t+?tB!$]ɠ21g##༴N: )C_Q1Ad%ֻ;ԛщDr΍eIx6\$8Hu1"0Ml@F *i*j"p4 >k^>rLTBj :k`eE&fEM'eᝓBT =wv/NjCOa;_/V㻿 Y[bQa.7Vh{jNK#J>ז_PJ!h&Bg'd#S 2^N7xʅP7c$ @̒ @٘]ѧ9+oS4HϹV>HFjlFz\V[;bdupb=>֪@0wmͯASݓ?o0h8}-\bZ+5e3Rgp9̸J̊{>HHQ`SFuf,KʙL8BeĮ&fq\31Z6ڼGSR٣cgdjɹYtV *)Tb[TQqK @Y1,db\2Yš!J`.!fQ85ꗩh 0 >vEDQE="0ݝ6 K4OVG@!H6}!KH`AeJ6NZI"hgB *d ɒJoBvxS5qH#tQ鲸>:]qQVEbx[R n* !Duŷ%\VlGR8!Ƙ![dOVǮx* 6TLWoȭk<|"/>Y(dyWJa(€s %8# Hg{z +VdbڼNq`prD(u˾~V殉w[~CA",_zkƠ8cjXtV9(ec)SXK e⦴fv~X1v{kHV-jEj)1sPv4: Fqx1,3ZUOe-/q =0` Vip:6 @*0>{_H<- !}#($ߧtݽA-fQ,C":X }3%)ɐ+g5X֐fIkδ6 /"x{~)}l|&h.Ac.=Z msy-l]ޛ)mj\||S$_4=x/JX!b?cMGjN}ۖ|:7t ImE/Eweܖi$>WO,nԗ0rЗ Y8M*u(;Ӻߎ踊Ⱦeu÷Kkf͍TLrn+N#Y./o =ގn<϶ Z3OOKοy!F-~Ӽ-&<<.~,Yw➉;}b~uD sީxmN |  Xѯ婄 Zڼ7XuFmvʻl>2YbN=-ŀm^;u]+k}7Yrîd>6ZYZP.WG 됣.uD3 -]x *Նй{]8ϧ{%$?(J*g3*Rd:Gt,h,,!YAZrܱ,!C߮++`JT[zݺp>&}P*ǥotܼ6qbZy0Z RG19 l KI{J'?Ӱw'I'fD)C-d.SR0gVxc)C2 T0^HgB >y=CvfAz~w^;-@keR@*FEd6< L7zyo yErWUݧ{}vvKo'UI`ۮ7syׅUbLTIyVpm>=g{nGОS=,΅*R?xMu2v21"R$g':I1_ͥ  Hffu_0Cj gK:i.RY8%8T N&&/st,Nm[M-M )]NϳH2$yi槓sLc˛3vQv쮺O)NdzLBF&M #e@H)DV!aN9P)JwYҬ=p(' J^sǣH9st 2$0J G.\Vh{ލ,Gc֣MAN AHkv sDsƘ9DɤQ+բݠskkBmSs?ei" kVˌʂ mWrnI#'EHFV{!Cq;O'i\$"`=w+VǼ 1St sg/%\2t9Y$K_eo@kO4D#iJe*2JTJrP /9-SVWͬR3de"FcKrO,)$ɩZQ:%N6J$ϯ< {Z*op.aZr4k&oiy"$i> OISeyLôuݩʕI,SK`tS9{2͙%ZjEC.mf[;OA{)m!J+G٣=-Z7ŽFnWi/.r! (y%pGÓJw5kZj/ O^jrGibk$;GRt #9byJ>Ũ@j}irwU͇iJV-!+J@4nfԮlAL-9hߦInQ{szgػFrWAbCk p ,><ڑ%d{俧zX%Kc-nSfUY32ڂh웖Wo#1P 1ַ+O'!n 4b+ү56-jD"m -2x(6_b`ĜFA{v>P.eA䞡Fϊ#H\zX=K $]J5qC (bf@dӥ E%Zt;JV7Xƽ9}Zl>sX⵭C=fIt.tl#:p)rGE*Gfx'zEqO9$Y/L6 "譈Z^pRD)e& aПsgƫ9lt;tYSB" ^X]W$L . ,sQ ů&:82I]P!w78|hs;TM:Y%=MfӸ*c3:94-9_z59+Y`4Fы̑2tZ<z1#糤܏mw+fZRQs wauSTOMh7@} t>za)x0ksoB *GVRV[ʺ@(+23Ymie..rTi.|<^]/GpwޝoJ/ 2oO]No4- nsPf~̖>xK1;hp شeܙU%77}lzҝħ|T٘L aPÚrh@_Ԉq%=j}͟w?֭o-wcA˷wAlya:?n2݅˛u?|[+‹9PC@C֓yIY;61/~"S Zmq D)#E5Y[@_إ艗x"c ^>( )/E%t5K> GᲧ)xL$NHƓ(9U:L=P/UslSngML .uU XX, svJ0>2/Km0Xav!Tt{Kk`Ip޳{=TV$\$," yM110Y $ +!MIJg)4q!̶OfC0Lox/g$Pp\twRhڭ/m{tt tuѝA9x]O\t-㎬r1A1T20#S:GG6*/umF O(MκΚja-/~Q&TO^dڂVҐ8p!(4,Dcpԙ8F[r9@K@#h-x$E  y]}Jh.s7NwH@M4mVGvE;(Na-ͨ ]xڀr:vLTȊ̒yEdKsF{ o: V,Xd] t&ӗwV tTCA*5j5-Qcf?-; bKDRK'^8%x#1%\E:YΓs &rF^<)xVY&L^YELsb.z-!'):^CL.xw [rtZ4T 4x¯Rg3$48nx*=YLhڒFf^40:)4,6'_>֪?6lbf2ƓM}#vuWzIf6y҈(~~Yk䇓{mx9JrrҜ|X^_slcpo&y奆-V:w6"Lm /\<{z.腵S?$})+&;@îm5aqKF3m42,5mg 4ӚA?UOx/st7Fu>5Oi] /{]IP pY7;mk<%8XMkMsG]V?J3ͯ.٨kKVI)EIh~#iwMB*ιhu!G%F~ŽRS$~s4)hrw.,ƾ6?>V)Ahafp"YNPYθJ:}$~I8$<$!<\JYD!\ټN@wmַ,~wG11 4;!~)fQD׽ 駘>wqKhdA8 B,@,CAW=O wJ ^b 2!NPp/kތKuG)8nKx Q%c@҂.*0udi2(MDB Y;g)&כ7>i>L !vn7t0&V|͟YxMt[FףΎ79J%Z@rHEG4^'WIKe J*+:p^Rdx B-1 fEJZ)uiGJ]t&n ވހO˲m`uﻵ>z|$zS#qEYvY=#Kw%W`XI)e(jcUFBN_%+눎GykxAm5%6^Fi(g9VǴS/ܱ!Ғx؀#~VWo'tzV_yd_::[u~E[mY-Ժy}3> &mmک ͆kX>/Kmp͍hwޚ֒VYT_mWkB_MG}Gߚ"^q4/46%WwC*ҲwN=ԦOdp:=MjGfQ(?:n90=k5 ?gWiSNRLAPU"UYéem,Y/)ٻ6%WN\}"p{&FDʤlYË(C1P1,45=U_1ָ(v ۲3nY^εjJPrӄct{nSu{`tRhK&,:f'NҨ4\9b|Οh%HLXh L H3"xtȤdUh:9MW 0^]\[s'k~gG\g?|MwiG^o5mlx6VbFk2<|[lE,Eujg$J8 1( dKR&K`lPvLPnܗrP]Ä'%'ee7p1@㈎ yx{!Icϻ@Rq[hcy|_xjL0O(`"hH!#u!2JE]J9e^(z{! nw{lfT -{KفoGy = z!qX=?5J*Wl5דͧiwM4]&|Xht%ikYFN܇**&Umric'y6tM-_\igMsӼ>ǟ{gzS퀈7ˆjwׁ%f$zʖ7^ Cm(uR{R=kS~Eb),exw^Rأ!҇q<> HX.8}փeHu@a)L".*y QީFHq/?I>sXzO~_{VQAz{ղmN)?iUçJCAMt6>rlYB/ȡbpc)dHjmkQ_(Gt"[. R$)rf!Fz>A9:EzH E)"Zg! F" 9l)*TD|(eI=ѥZ L}ze8W|?h~ M>ur‡N,:J U +kpj.O!_K% Z~F允3F.C}E磨}}o3-f,ˬQ0O- R>:kVg&5?l3t-Ϲ+NxvK`|N7oR!R{ |MdMEGĿګWƤ&}[ 0꼸iRqlWqCOWj-f4[Fo|m?_Ο 7G 7وBŮ_:HOϗ6i /VA UV5[А[p7=i_W+eɞ͖u[߻7EM4󴗡5~,n R5 v9z=nߎsjvȖG+n=|}7|9`歑Vjo}uyސk%u/?zī3wǎgٺ=6Ch٭n ǻwoO*  L* oppz~gyޮ&|~me=Jup>[δX4%g@'% =\'O@Z|fÊnE5Z Fao)H%0lFk%K, K 8&9d19JAAS@$1$;$"&%=y9 F&oBNR Tj(rG v#~T:Q\I+ hL1ڃ$V-4@~Dc:dutm M2Cic}uܴŢi*Lg7Y\WOe~XyM5/Q_O֌ugW?5~~U)yyv^=&Z#j/ge[0iW~T9.8K]ޚx{.eī WcϾ ^~7}}H%KEOVgzҲr? KWUbR#Ek|¯45 fKt` ?Y,>n50 gKeՌ Ә+r~qlq@ݜ|:o:oBiܨ`yf'gY-Xۦ]B3ZnfODv=-*a"v %37*ib/|i0f0P0Lpo0M⯫bwVJ1w+Rr3f0\Kɯyo$%gS0kʺE֢(q&fYxpk%FTθan^iϬi1/gl,>ހ^6;^^9Dyȝϻ-a_\/m>C,T|::Т/2HJ  UĬdFG'aD9 ݒҎӊX:hk;_PƔ :ͪ")EI[dGQr! 7(Y[d(`7#d/RVuXVW9}U&Wպh?y?D«3AG-g@}x Xd|zJC ja 6P2BD҇,4)@#$k!* R1T8`]&֪ǵH"?Y ]rx0rSww=b?zoi X],xs1gGgꟲ3AmpC0RU3@A8ТL)*-)-8m`Pқg SaN =\A)G)[ȵ  ,1'3TX)P9't \9EɆZ?Y=!&F!M:X E`dR|Դez-ef]wlܹX@ K)RlAЇY0x% $#R>k#{wV^rD*H1TXQG/\&Y8-#2a2dTkgHU#-`FZɁ,Z9(9;_RP ,gsur Qg1*_>w:NF&>Z ޖX)% 1CVAR0z!EKF}>%fyu:H.)> $ÓAh2BF5ԊWrԓ?>a2\NƹOH/(yP@v*:RJFHi88#t?N5OJ6kr\\vx@6rL&Q`9HhĒbU[eYAX2"DGi"´}p8E$|HCR4RUCYF@\xwI&Ԩ [DYS8,xC%vc6UkOA:㐇JX:_ZnBEiyH T}Gv@`g 61<$MRh/qcVRzz|HdCvc\HDYbVn2*>9!ҺdVjhu :6BH)+#?O~ZJ^R  V7D]5a|zX*?39CC'ܯ/+"f[vq ݮS[|-luAzJt[@DI PЩ(B , FLr 2g-J*GEԔ AZjg41%᜔&ְ#J%оȹ_w ǵYl}5j)Q|&-(wr3wa)(S4Z +1v0Gk>F1{XETJ*FFIqPTF(r! QWteN$c:Fe!ұ.d֫ EErP1佈0ZCb9<TZEL)wm e qk#'X~J)Rˇlg~g/CRd˦SuTMuNz0tDf6*H8EP"+GB=Qe HAVPFӊ $\ )}(ˑ'"]"/vЇH#uM͌R`2Q  l$K~n,Y5\v/ KK0*i[s,N<, !6g:ܗq̩~sz7OWB.idʓR J!Cq_;NW^_{pY#†c6Tr s$x;eVY%A -)cqs4< ǐ^ax屬hi,!HkԊJ*͵(SDct+mfHFd:d#J3I"Ay]ywʺk^QS}TkAqy+D: RsLxplO`;"|o;V!U eCuLHҙtGM#WAkHHyiN h6 Px/R2<7(q#Xe{eT('*ӵe%9-<aR ? (c{+\|yۮEᤈVqzzJuӣE|0WZ#Lj1KQUg][6҃Pƥm N[*nJk1O.V $ 8F.[fEh9({ {EUdlA*aXuQ7O5rSZ_yU]x>\4X&gk&]L| bw~Q-Ջ]rL_\|qa!6R#1~˶aH0J,,@} +LviŘd-)Q <&FmWkSmH4tRY ~bX±xTL^wԗ ׻qowŏ^oΞ|w:{Ëwî7XIp1Zbbsd:ěbHH (Sr̠xb;:`& MR0)GEo_> ̂HB9s ˽)YQMg@$x#a*&'{yWYq&OՋW7_K J=pqVgsOp<~=JQy|p b9p5)e)Nr` a\P"P2zELN-&x#32x5sf"Tnd6Ș䫑K2Bb!E+gm2M̸q}J=hi욜nP~O8bӀ0J-4A:۠Bxj5 TUB."TϢB !{ƓP` ū`!#a:0-"rR.k:~<K:EmqݜhFGEPcINK=QS10R*f\H☁ 3  D91S/XG M٦+>P|Hx4<6|싈$3";D'jB;D'PY% ޓ `'FV{2DRb76Q J/ gN X҄Ij$fX;v8tvwQ[q(1:IɾH3"pM#Sm%X^[!umA@LQwG%clұ/x{$\V{N5\ĩ k6mLFT3=qx=(¿T>C#Fm%TZ_:(w ]7ZX] CNg ~OWi 1^(^yLX,"gYf X&HqY ~!Qڔ)s肙~{żoA&vSr\UՔa'ܸJ{mϏƠ45©st ;nnjpr?r0~Aإ"U:@Ke%"H1:hv\NA<3M.|=!CCWZNR4{jT{IPXt* P?mHr8qI ;ɂ<Or~#Ig_9za)8v Ĉ#(]d`#lrĘV3M)4!E:ϦGX舔RUjѢ!m)m Bôwd&ē7nwн 됎y$ T+AWXQ!0U*hg6e{NIvMlM^w(~'7 '(}BԸJ*4Rq4Tůyp|R5~v;6nt qz5kx} Jr:+Y׻ԕxir0Ұu~~ޠH(^4?`0IW}3dk̕jB,3iO+zb[sXՎ+꿨%_UBf"M_؛o3WߝjԻn`4)5GfZi]wmn6q-&>ka XuqgZY2$rx3:o;ޚff;g%YmԽ2jG*AEnRhS  V;rrINky]8G%D^PWe^sxF#=cdfGy$s锍r#SsE"r)ԿC@/IQյ&_{ݼ+>&}PJǥo_t4QTu:zk'QWd TZ,`ѝFOL;1L^sXs"1@l4FҒs/j4DR C( y#!w;ҷY}gk aoP -$rQ-ckO< s9"l;6Wkͺq*"Ӱߐ=޿aکvhJГT^<)l2`%a`pT*V!}Qla&|jFX|K>4a`h>7CPoS(6]9-;Yf E53hQ5Хg2 ?16 =n6J$*2`*tk4mڊmk/\^[qـEauwuS3y{֞MaPp<޾71Pa*͕jQf1>%ɉѪ8cݒQM%Hc[u%RL$}բ$PƬ/SFS**Z$5'c'TgK4i5khiOZt!h#ig~֌%)m]3BJᨨf*LF'()SS%*D sNjBؔ L}SV4>ܛ2eQ}h9(+c0pf nۋ:L4Y֡o+Sm99Iix*nJ0T03S| zr@cVKk޵,lM~%|m=2`&aA4R/Nb6Yxc*l㓂Oq֧D!۪yx\& Ϫ,U%X3F"ΉT,mtY ׂLQ 30^7J?gZ6iⴐA37h}/&("+fcDK;L')Q 2ce;fؾ3q`Zj*hږYny ڦG1^-BT8t/TYUu rrUe ݇)(yE nXP|ѬƢN k A-)BJ# I"92ʘe(& .LKc4o{* ݗTd2jnuě!q3, 81cQչY,TGW>^_ż3roEdəPLp)icU0Iv<\-E`y\E4Χ,}Of_* X(;Kb f R;ȋY B-B @1=nD)(ʠv?b,C)9k0 6#*ZN:G!v,p :t kRg]IPSRm>Y5H#aFVȠ0%@" tdI..5vEQpYL$EBi9DHT @QUPբGE2$c;ȾJ$@ A169M@J6?G =]DP5VY]xfP}K&MVDEKUmXH@$l@0U>oߦ=KPa2S6ѦicA[WvFroS6f^}̵2Fcnf= kppS>vQdf1h֘U}t1>옄CPF!T$ 1DЅKϝq%G,X2yS{ẖ AyG[@+TQnԫ/-(OT+Ra_T05&xXo})Pihk@]n=F4c@_ #0X?$RHC\ kI X4@" 䅒I &bI &bI &bI &bI &bI &bI &bI &bI &bI &bI &bI &]!C"-z8$̵@6诞8Io1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $7KiA v@  k_; ԭTIo+$@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@. 4$C"کD)=־|$$зHeI &bI &bI &bI &bI &bI &bI &bI &bI &bI &bI &bIopi=Kz8/5׷ڝk-E^_.PˋIOYl⪤ѻgVUycݎٸrzz?0m\,d7W˚6\/n4\{ u3]'}SN1 hPb=]J^-:| ^ԋMAsx x޿cI-4=,3?.6,lZớx| Q(TUiQ & 2uX^|аD2b_{O{?u%JG7(tp3w~d=@1ZbM',#{e*eaC" (N/QeyFkݭZ~(v"UkVdl`α>V,A'S6׌r<2џ}4FV uL6c*ݸ Uƪ[f5%a-UQ\Yr,o<ƛG}眗O_c<n޺~My ǻ5wyN&C$#[Ul64g|=Bc_wZŵB'"w' vynC?lvX?o}J~)a(7-PVW"_ H+T{D fƃm'bzi9wʒ>]8P\" ,jO !/~م_jퟣnY yWȮn9[_,.կ-뢬Z^G?,:wG>xGݒG]oj^\+ge 5 [}}2 \l{bsi<˩N_):lLwlLJqY`1zƞ:dGwt\?OXn)T?W-^,_͂E5K=ު-6uÈ/YE0EUw~xUjxq6˓81r)?ѿ+eݰN[c#LxA/>Bcמ*'ryRN][o[9+~]`]7ӽ=/ YXhYr[ri`u#ɲL9:,bNdZ[[͵~8w܎F _MߏW]{ ]~9,,r3\G Qgգ,ThGQxSbed솽EG#^֞ kx2?-<֟3L9k6ksރ=Y<2N6W0:Ƈqg(U;{,ƱǴxg=?CZ_$/e:/׳f!.VG:ucs:!u)&Ae==#t}7>܉ؾ wضh>j;혁Cm[aR-xN9B0@,PjTr*Lo6C,ТcP@3rS`3>3މw~T+݄ h"e  q*Eëﮡ[Y7Syv x=.L[Imf$73煠JЇ= n6Jl;B8tI9i!D%y$8$OZps-_D\(j'ok %/t6@$! !l*/G S$U[۴"aVu)TۗAaw6ROePl/T^lP5qS ٤LK"&X >ffb6W!⭔'m}^r O{90٨줊R ; Ww,bܬڤC5큂"Hv6Y3r\F˧*2+T1Ս@шZ,֖\ՈU @* !U{h)?_( ? Dx4i ɼZ'Ub3Q2A Z͠VOe',~{h0}}dt%Vd)X)ZFpEzЃRE`D1*:Rj b8( Mo6fgַjwSV ۲_ɟ9w?1AI)2RdE`LX4jkEE0[R.Fɽ50m7?w'"|X(ỻt><FY=4K2LFlMYVϲP[!/Q,c{h'+0Sfrt! 4-nq Iqg@,dJ6qCdMf$fB8MSCסAI CNSݼHpNJkQ’H3r83xZvax;|Gǯ=˗,5 ,]U`Bb`ɋfJ}vG%f"{6Yq:h(fR*%gW?# $8(F((Ko)+)Q_]ӕ6ĝNIF:gѕe-|)$&t.II Uه"@l* &y/rp}!ۤr1(`mEL&;dG6ftڂͅP sˉ"}!;#A F`X R 6fJ1Iڏ4 pt=&'_!ݬh[s*N|&U|>C  I3u\gpVlEM( v(!ʔp!g,#i"q)ш 0DY8m` x']I^mɸvḕ6# P^abkxFQA h9 / sXŒP :.&(Y:K9J֮o̮zQ܄ET p>=VѿzGJ+>]Xb`D^ejJׅ) #{`<)J=^[|1a,LSL͠sU>YϤgU%z h R[A:gx P0Θ bx) F ?<W ƣՅߺxzpv _r;77gV#hz6+pvCn~7:\{ jXwO ~xN~y yߎFx=ڋY  [{1/-Q3gvDl{ uKi;t݋yzu}Art`8nwT756jj7-ۊYk(5ޜݙ5Tϯ|;rMp՚`TwUaڜ۪G^_jq45F#i}]xsMfi4(^ՒGÏ˅\ &!G$wkVon[ZH0|3ѷmk$B<5=V>2O4`4|ח̎?_Wo}?ҿ}_Wlu+0̆|׼ZW_wJ[>dhr֋?`\mZNyǸײ7_Lx];x_]F2v#E L=D k(seQ>uFҩƷ~pX*c/nj+Dypɑ12' eMp e15dBYB=_cd_ 6Ϥ1;GzҫwaU2;Z;0x݈x'渕M_1&.R1.D,(1ϳ/{|SHC+290^ݎfVlw+#f_>j \/+-^ :ށ@D9-B@ U=T`p bi'T{!ۆ2VYW_tZ)v3V>\lU- 6/L w]~n~<@4malJ'XR%kB_h^G!/M k 6o40x;m^iYh_vȗ/?s+] hVFR6@Y.Rw1I %(,̩StEKrxt90WePe`WwS)t/*Nu# (V(Knec,R&ccSA%VAd"aYj)^Fߐ"N,$N*6L΁ύ9tzwSa)Ԏ,qvt5㓣"wQ#O(9|mez$<9o-z?9'K-ןu8^kR4!gA{v*l5+ThwD6Ue9,!IVE!Tk،5c;ҮGZ+8R*h Yuኢ:mE;Z/Ί>c48ɋ~C`25"Bs.(LD*e-)0  um3CWE2Tg/eUm*jx¾P-' ;ߺP+ri< dl+8VkZ[ZG+c)1xɎQB8iAcVR،Je09mEb9y#4{Ȍ f/:+rk [Ig AT+7#a}8sRU'c[DZQ6ֈ׈Fܛ'$ٺ3P:SSe8>K ^$F_T'ހɴEnNoLF+JtZg5Y)9'-S eʭss@KWX/NpRUcz}/01qζĿ$ ?{ƑpKCcnX/A?%%)Y"U )Ґ9(Ē͞~VU]UɶEmudtGU>(Y.B.:}"Pf.AaN=ǀ{z1ERrgRYPT5"Ptd2ʜLJ<crѻ`?=ۢ7&GY)9F{2㞛T8Zk;R}tvPQGN=TI0Q D#$Y1r3r`Ȃly"9Hh $jEYE& Kp3"ا}KI IXZ+Ca8ұ`9z{L8)zH]l`{Jv@դ"l^S ˙?  .Qz4t@K;KDcur$13X|<xzi=Y6#*RI&ssO Jc/ K' I@ao\چYt"y\7J,9^a)8v Ĉ#(]d#l?1CfDS` B]ٮ?B"#f]-{v[pH`hSzvs`G4#ԅO)t 0}bS1%HLZ), )xT(EHB"D%!Nj]\¸<+F*ITN{t`Uoٚ/WA?=?7gտ?? mF=R*HCWJۚ~,JσU6߯xqۻ ]}~{j#⿼OR+~f瑬.W_j&v06њAx6okqsjT9A(%}{_۾ߢUіWb&WWu6sqrˇՋ#ßj aoP -I$[ߎy=HZ')礋pp;#0 lW5_MAK^f;ݺ{N=uޒug]dg'c3OW\YdڕOc X,Q*)A ƙARLC~J=^;cx$ T`! X.xqZcEf`ܜ-W+QϖiZ|qi?|bnUe) Sbır;ÈnJpQr!stt5HD? x;O3j?ElS B+V=s> 0Kp%Tp&wf2Z0hX9 Nɰf}j(ң& p ,ѱou 4N"uʕ\Pb7_ y7:Ί9DIqƚ[(C>>k>,7eB>-!LƃI-ş W* 3R()vz-,ڸhN?'#*)dF *|,Dcr =̶I$k-# dRKJURf^rIp|`T'$w|@Jfze*i@,:1D[bҥVògX^̳,o )YzM!Ŕ?6k bE8ԵEoI8}iy=ퟶ^рmP0PSS~ l5Iꨂ -b4 )TE()ʩisnnjA& k0ux:~ k޻—OmϿ=F{ k[!B**c9#aN`Hyvy+17 -NEΥ- 6Pe|mdgpt0fsz6v紻MfMT0d}X:ȡ)CQPJ~קt8Ua'`zz5:~Gg .ax J+$Xcԙ*. ^/sV<܏_3;E3,bq7~1 XS 9Ų=TL6*uHu.̴v\%3@T)  gZQ5?Hyb^߻ pOKp@kVUg;#Xv?o( |Pf 4V5&۹'guZL6,}ErM u>$[b""k˪6nȵMyRʑIj,lAw1ilfYS.!vi[G^vcXW].x6f5/\fn$sߴA o^K۳|Cō_< NfMKsYo E{oC$y;tHvE;L=Mq,m#m-ob&ue++;{z/}\>`yk; "ElªZZg"ӈnX^+k)5A}0ApS͍QmQ'(:~X|+R*:@UgRa{bJ:Mjn)ԅ'")K'*TR[ju5+J8T #qY:2äƀ`6 owERh6|u5!cFY;p%{qo ÌД{ t֜-P[-_ALr4d Y[xݧڧt[qF"[ڟ..Wī|KM H%FmOq+O0/>10MN ~Ϻ+Fm%TZ_:(wzu~",s\>& D P=89֋I/. EnWj7u[-IZX]Ao'oV<,o[D'QY ;{8U7o=D5 J#S;U H8}ڛ?sGm1%%+.p 哰4Q*y?]{Lf`~q!_[~w۫W[Iɺq?B]R/ʏ՞vF+1ǥ_/,tZM+斥5a]W_gtq~xrv Khi/mcBF6ۙY^=D}õ{}p`yyCܳ.MR,&{룋j/QaV`KT5̨:OODORF+{EJ| yS$ eWDAD(f`079[Ν)#G%jc *&u+%XMҦ`A먨2ƂYkFa65tK)ҹ`e>RNAV)T$.tF-.YEyJ>(@Eg.!mLACG*Hԩ:UJF3Le&DPX Q0(c;$cg- }4e ԭ6aunŸ_BUy :H^|Shf8-]' ;CDIwPD/6ǴG-h~ւרg6Ϊl**֠Rt j2bIPZq)@ɓC7ٻ4h(UZxjV&c"@ŦD|&r޷ZӡAZb_U(jzB,ke5Y1n"$VJM슍@AX*+Ĥ7b`}QrFG9۷bIks)UoIBwDr.1pD "Bv02`Z't{ɺ`C# ",grW f=ǨryWSUrIm#PbU4ILK\͵:SJ HQJp4Vf!E/z(E6-[9 "騵c(hP'`C$&'%J֏oZ;vw}ѶygWRc];TZ\{g ̭vpsNTե΄uNU@:1%q>K둴|D"*0puP)sH*c%#xlpJ](prx"]X` L h1% U`nTJM)kh>ʕپ$Vt%kX{ofoM oMX1 C$pJNEza݊@̥k]*Ut($"C@G圕xe'Q~nZuW-0_uj2|r>"Nsyu!&T^ZB}/fk{1_^[~mgUqf=ֺ%V:|~|zArg`xnnwT556jjgBlad9s{>Lŕ _^up`cc:GӃbZk֜狦giZ#}g6t푌]?aa8ufy#Xxv(Y͒̎>-z~5xz~ Q5?rF4Wgn[^H xrۣ/ҷHSEj=z=> OxtZ9o=VLߪ:Iua\vy62L)'8blj[օv@~]}Auv`<}בs5UjLym|eJ3Yi/W|QX]r "Pt]R; Q` c:O}Ox|>ʠ'燓Yo.2n|J;0_* X%" A &ޑAw)PTh(} ЯC/CVJ>]^'j2!&r_3"dre{-({ozwli +vk<%q{,oBZ.,|yT&*yz 8gg [Zo͛M܅'6wꑋ=?9̗ak?IzYQcCMVFe `RjF|-P**-s|1B\B<RP%5f5pXeiCq#u!°aԅϪ W_4db_ y~7tttth:?562(9'(K%qt$H Շ Jf`jdGEuS|v&9#o,o]n#Ir~B@x X{xFhb",Jc/'sd)HJJh"YQY_DF|'*w \lv:K& \cW3kmJ#)'AOT%EZ%b76QJ/IXobA% 9 H.wp6q; -Ūl!:͒]"ͬncoijM R'fՑiayԹĖzqzPaٱ>d!MA@QEȭ5q(M~B4 YWr)Kf߾ A%ΖRQjA|鄢q* Aj?%[?9:W4{y ;WS/)%w!%EV.REG-_>jכ!u]Эm lѵ[O1;Քa'\Uql6}-~Ʌ` :KZs) 1SdZ 0%(FA0*HHpfiʃF4B4à^@1'6 Nȭ\DMUqLa a ̰d =!"}JghpXn;ż:$#C6q6+4> ʃO$ζ#؇b}7iK~&WePqAإ"U:jीe%"H1:hvXNA~PpOC8[s|~z`ߟP0OvzlFJ1N,B#ULOMzZ%nQl'_?O}}&ԫy:} Jr~v̆ev#= 4'[ϥv|l场;bHQ$XxSor-OVI32ж_Z50*R?o}oXϸg5ý5u/ōy뻬vnK>޶ oo oWboz}p!2OPQtZ*ZZU}k~:-٫1 IUML:S1="gSCfl}^H 8I&Ƿe Dkl"escDpޡ٤ji7fU v ,R% Pjސv-޶ǤOf&U<ı^s^s'g|7:ۑ8b[m4D*_;ɒBs_l4FҒs/j4DR C( l^Hnӽ `|\FfS S 尷Z(N$rQ-oǼמx$[sEظlu/1X̿#{֥۴S-v[ڮ"mYs2XImv1933\"KF[g9 Su7 >wT|چS~1==f1dK uKwO |?ޓZG)Mx0EA:0&z PT ݪخe54yߦٿw1@k@4Ƃh7tnvmx`^͡L~} A(|􀷹m{VQo\Ydڕ^ "* R6(SZ8S.woevfru=o_<7ox4^aè"<9.8'DHJXr@1G$0śe4FCAr;]+ ]r}B 7~d>O`nz^]V0s\gN`P;kH)EԒ)E 0;)vNݚrBیңk"5s)L|]uڥ R>#)ŝ2Ⱦ+;q4T>"qiyiY=D\? \z&ڥ1ђPgOak=߽Og|2:IH?D;tgs-V@b!Qʹ- q䎛 < Pji]RkRDlo TG5N)[mARqXt?3`*S2`K.- O%A m`މzU[z 8G(wp)B0cF@X1b"i߂VF?'W} W8k}k~%o\rCAz--vSZcU/M2`8\S,W4Z9<`!{gq١d(KX*Zk;.bG#彳{6TjEX2_< " x$ )[X1c2b@Dk45[!-OWDSJ$C1 Ra%)$HKE<=@*%OƢTz T&#;nq"f`I 6U?uT^>ͩ 'yӻsçpt/a`P`Dk:S"hʒYIR<|}w ;i7b:)n/&Es}8MQ6jw @*˦} gfޘPKk;1FGr;-5>JE{j9OjPrrEc5{p}xW;yTc!i1][>-C`f߆tpOl[tᴭ Y[ }>4%{oT֖]/-!y{Gȵ5岒t$f3โ6u3nj]63"oG;y7r^Ϸspw=/\?O7(~9 y_Ym:n6Os,urlwMw)}˜ن:ɷ/Zp~26Bd_tgFcvb2xޤ7XQ#K)Y r" }yvv2YJʱD﵏˝Gk/+ݚXth si}&*D~-vˌ鼉K9Zʄ2ĭü&BP5;LTsc%bD|Tk B& /0wJƙ/9"? IY:UpХؖڂUdVvܶR!<H\t{+01  eE `Yt4D{S(sNl[B7z{ _fQ4f&o '{q1-qWSZfb?l]0 |]1^}!ʐYvqs3#["Lɴ%쀶Ғ&i | y섩Cy;oq37pvc9u% X7hAȢD 9мӡOT+zaR[E E N< Ό;!Z1eD#M £b@!D"<l,c*J82^P}n(Lk .aB￞ _rR{w(0{E5#}}8Oj¬$Xwz]ۮx$-+1GzyRE|r&@,a!Z"L(q߷zxŤLQGE3jVxFio{V?-O;u9cj4Q\G ٴ=>I~۫BQכoyf= ︋vWlUVm|VxNzN+j$+ ;AH1:щ.È=M|zڃOd(D-/ ՝7kfq:^f>2b^O0s@3?zCKv!q ɯl/^$%עTG0+ʺAƠIt+#FWθfniwlh~]'mkV6;pl^G>"n񚛛'6^Z?zĎyXbqTz] DAc:@$d5Ą" 2*)' ."j2QTfYzscK8_"Y%^rT?6p_E2f&dCF1IIl#>!)6~Cu m.UV: &mC}WBRI[sA N:[B(^IVt6D?)@Er1F[K]LBCMQX"9'TQEAf$H9"),43 c$cl-2 Y7^ҭSVݡgbدz*m1O_n|~YvmVM-Viy9Vm QPw4C՞In:@ @֞OTj%D&xReց%eb.F J^]lY>0J)ZZ5%LQ(S-3Q8i,}x荜OéZ`̴Kv3gpcsEg*Xy3A'C0r53rOA8 22XڈZC 9ll_\D!D!Ǭ!'T@$=e}*G'({K1~pFV)A)P֗h!$)/ZEE }EΧZա^Z St!_>%[/7?.qS1!HK$7ٻC|E^(ebFt }ri%{AΎtҘ )†w$hd}0&GYMdb I8UZEI/Y%ɢbu,g9덜嬓?1uvrCNH9֍@^E^Z8SR)F/L91_ h5@"KAO?3ga"<EJiȋdvkf02xHj HVoP+]N`'=/~{t|rߣdy#%lNA [e=KHB Aq8tyuH5CSSsȝwűv?&4e(u##(HČ1i'}!b5eIlh"$i#´]]"|^w{w#p#9j;iky`x*]5j|I#P!ģ?XuC8d_=KN~9^Doi[^Ѹ'!䙂M|{I MxZ̹!$$>AN$/$)jC Ma_B `1('?A$0} !ǽnoi/ ajqxOy[3im~sy!F`l}t6 @L- tK2!;/_ImǺdNF"hU *ۖTH)2hs ?Pq$1) bĩP͠XVgO1ˤxxJ:LN{>G+ݒuv8nuRnk=7vLM{;AK`N"di6&{D.RH#ܒ#ސxD6y D$] RvsIHo|Z7/@k޲m`}zfbQ8v]1XeYz7/.ǓjO"9ˬi?3´oyBAXd]Mi@ܰ+8Leuw4r'TםtTR<__l^Ϥ3^ :%]|ԞONTIJ$1'wuLSt\dk6 ]zئͫff|~%+-/5kx5T73b#E0*,36!9.} 4/Yh+V5qAサ_ _|4j~`UeaM*wM?+:ǿSGMjLuzf1e}c>J`^rJͬ1{(,B Zou1bR2ZreD=6dyS<$[[ojs2hR0B94KѶ =~A> 5ˁKumK9i%74e/ZLh%yKI -4R\ZEJJ&_k!I&RwL}9C,B A.:m/U@ttN$oCp$1Ad|`Cup9\XY@z#2?La1Ec(1K$0ǚRʂ+hX|+ eiO/v8]Z haK)x1Cd4(ZC  pƪ\cjva|ٲ vkC9I/ĵPs ϴ'xtmO8c4ɈrY^iQ A1g $=i*q1Xt"hE ^:ɇȝO. gJ[LQ@;qkm:x!%h˴zbEAN ^VRAk1WFr&0ĬPu~](zپ[y49IkHZ$ekJdI7Tq^䝭HP`A JUHwA'4Ů,;7S5keu thт'B^ )% ^_;xu`|PT7S%L"k kY#H_Aݕac1>A1Iljs~2yL'# ƃQg]lvwIwoYA]'CfמF]<1&^'ybVnFe3{lQ bMkY~bHoMU!ѿyϿ|jX]|.]|rGޏDz,hgcV3]j$)R@sX(0|Ngqa4d%x6oVMPY %R]x}-7:LnM&7rM/:a^GYȔUyrxdY[>D/^]UBZ,S]FhXw^ZqK]_Cs}fq{uMŕ ^].?xym )&,1t1YmUUӓa MQZzkKJonl[3r{3lm&bA4u:Gb]gɇ@ϯ/ ۪[]ur[j}bڰf~v^YzH4qX} y&OO{_}{pwt/Uo<xam뗿U1Nk uNjKsGW/:cse\W*GW߫(Rzɉ|>ƛE(2R`4.6(ibM99CLٻFUAv~U?,$q?¡+z)ǡ+Xd)1$jrsHNY_UWW) zAڢ2 DTZHkUyp$q@sNP 88{T|7o4sA۞1?ZAg,Fx.B׿Iͨ[]/Jn)9O~'? |G{eAs: 9g&x 8p-ǔw(2JR9sJC!DQ8q,tJª$R &XLX[jfBN,|Q,\Kmq"-/ w5<=ƣfGB)NJ࢑K%:KH[͒DIJ{‚Hld< lrZ@YM.I *mGIL(L+]le)qv#N†y4 8iaԦj v{X4CC8IQH/8H8PE@)mB*L i-ddTyT5!/YdK$1z!օx40g7F}>0hxlq("ˆDqkcHxS)ȡ!)3F4!(qg%Ve9hGɆD2(R"(Q{4[Eϥ #b1qwQ;Ŧ|fɡ "pqm3G<ی* p8Lm1pv;Aգ=CH:)/ǂfǡx( 8 _֥tm]#u_s6bYgtwe%23S>qz37VYgt3P QIWS]-8,^jzUo0 ^4Y'PVs eOPW5<%9uA Z"oR If|4!N"9E&AJL H"OjMH L漾Y"2Hu VC1q ;|])肕~GǼ48gU)9fG9٪p&5/'~-~ɕˇG@c;L2:F/<D$yV'fHE!n0TypIP04<%kLd&Bl53D҇ TNC>/438TN$XzrZÒzV&.cBzD ë9A&/K=[b{7Y%«u[njrs?zмrsI+ DjDͲ1J$eU, |q酞y^(Nd>n҆HTA>@%#Q h>.^9 ֈ\.rhM%)"$P+j$%c)!qt?$4soд߲|Bͳ %]l/ݝ X[|DU+̓Z2E4F!%_Ww}fuf^~Xv2!<쎎*Ȣ-;HS[Z~J;A9j}e^F6ܪYLg\dܒ7+_]~e'(o{m]9>!n]]n]pxu_$Mzv$;jѠE t~:+M֋O!Cu^Q8od^V*ae.NBRrZ)x>_"Ӈ N.7WOSENTr},t8׾ V7in?L@;#r _^#-^f7.$k\ϐb4H._X "Qgvn;64;t5fMO];:3@]{ug&짼]yjD/f>[~hew?vsKeèBb ^C4ȭV4KsѰUӊO\ia2C \ \hH3tMbI.e":Y! h1nq7G__tP-y]AI R%Hڥ$tjHbRb.zy>zvמm,/ݗ_6  Jk 4z]c:ljޏf3|׹5VFm\6Z Zhzۺ6*TՅV|,!ϟK߬Cm[YcV?""┹}OoUAZ`V[X('3&0\SI47qp8n?BȈ{92a>gًwN,Zpj@^kf!&,~|j-7x*ڸ63R2thE4!Hbe3H{ I'l(2\4h.!BZ-$))]1q $Pir Q 4rKB%fQ,B M$ y `eqxx[d+~URMgYUUǏ׸ f1/ U/KUOv2>wU3ۊj߯V][U3qVzO?PmxɃNU'dINNgcjsbG]w{oW<~/[z5e+;×}vܗNoX K^⯬I-{WGr3M/yGq.⛕glk^T{KZ)jS՚%;ᤩ ԿiˊgL?,n)Jk'9 CS)ݜV779Ƥ~7{L߮GK΍ٕۛO6La;Sv \TNa%SԬ{tףqX2;fŖ5Y^qJ5[MTB=wڦWn|{,А:cSwVb|:`HÜw@љ Zplm͝ɻ692vDX6ghYe~>"lO@/5?׷w]m^Z6CN@aW=!IA2\>̈́8T,7hWtѶ?O 7F4$}A8s~X`O}z΅@(#N, rO Cq@'>JzYA"瀌D\ʗP(N/V;!ʀ( * LPh1q }t-kJbBiW\JsIt_8& S&pX9,Avhߘ:`υrf"}$FLV#b iMI Pe t\RWĨ1.J)J-)KLk )eF ^P k!p :vD:6Gk1fy~ {T}l+oUm٭~M0iqjؓBdUxRrL& H5D4МpH?%xN"QDy MIA{>p|!jH"t}_Ld/mA[7~8[(旡$8ż mӻw}Dd8 ^jN":tbJ@?~J*J^:oy!FdD #ѥ8xDrC!N/ywoofy){]Pe2gir3q/1W$3S4 e0 `YBICW~gRi6h[mMr=)Zc%T[:F85ILԷϰ`=n{l֢bWkm$EK`uytfvdL</2jɱd?k=,dVYjnW۲"Ox>~u.e,[,Dk_xM: 7_^L醹d`muÜ&[J"sڑ`BF&wMfyV=QAJE!q3(dyQHPh[Ͱ5-Q1D0f@r1RlaB]L0Q[c%)M"68FV0%Q^ k\ҿXaW0.:$C4cK8 _`vQ?f@e{:)G&-Ao3ǥ "zB#k&קTQA4Ŕ2Yn;f6+#Eth: CȂ@gU̳졔֎qZȭ4AhD Ƹ'&$,E,`Ae=&Αzտ5۔6gHc ZȤ9eJɂ'S<:d)Һ Ȭ1Ww_~ %Jq&mֶ C Q! H`0RjT+fCZb e`FBn,,3dqA,#D0Dd)Q8Uښ86R#w)a|6@<1U BTV|Y' YPdNJ9bV*VDdĦU["')HWciB(”H{Ov ]%ì._QY.}*q6cKƚ8BN^Y;s<Z5YI#jҴM2GnDq8$:OqnSsk%]d7uhMquEź$REGt@ѧ%~Wey(+xr,)vaGL\+t>G5 eJ|˜9^- fp8{XnhhE/2;Y0 cns1N9$`CRKLtʓelK.NQ2AQ4&bjlTd ɂ'E@ hp T}\'Z3ڣx٠:j;BL%gI˝z\)0!slBa4mOq?^A zgDCڸ2&xLFl(mȢEP r9gb蜒bTtRy$N:,(Rm%<2D?I<"^LO?2 nghNNoe^( Tڍx=prp}/d]~Fd=,5\'D_ֵta_g'0zoyXL 4q`MotHzogqVV&CX-zX/nqriWo/nro$) d?v[Fh%›k*ծi^OnSzn^/+?ZwxKc%\ lzv[pss@R(|U|~Ϸ;wvJCI#Fcu0RĄEQ4eN>n&zy=tu(QMwZX`\HXqqZbQN_|XG۩5ug$N/i9_?7.Wx旟]4+1/wU1[k uV{8_+Ӳ=I|ߔ]c<19~Z@ ~>~h6C ku7G+U[j.!N# ([07'IQ3HxvRu2H%0m5,.@k$Ϝ0!'~h'} 0ds0pW˔p *%b@YV΄$*mFٕ=1z^{AV[_)ow7-pV;Oc;lNS<˗y+3\55x hi\f FzSn#D)KTDz]tx,'匿ռź{[&AuZ2T y@d(!4RЧK)ʘNNn(`C/Uԗ&78eln?R'w;SjZ8)~49'+jW6#$l ȋi[m|~h_v{5=%x.OW7l{{E6-Ob^u36fu36^ӷ!kClȴiVS_Ƨ.gMOue<9k4o$^A#ˠGȲg|PCːz8O~|IfD$ Hy-Z ^&: B18'0Gt!A`TF! 9}K\d3CN;tɸR8G> J)|;;_N쇜Œdiâ'Ys6f20*$Ս)Rhh_%***d8Q& (*[ϊO < $F"sNrT 0++j5q=, h)^INour MVT蘯.?/{^z|/Ӛ\ٺ{cj&;rw/j`2Z)e*e!+4Zt*p2G̅@UQ[S!0 !6AeВ,^L&ێYt(΄,[*p-q#vXӎ9[kuڼ2jG{y CUFhf3QtL:nL+eunv嬴1X%Y $+:š,jFMK !eX$85K4aZTDQ8"~AX Dx0IU\eS6*() Դ 2XںT]g0m$ƨ$dU&#E.8RVF ,i! xe!&aD|kHgVɩqF\q:)!S: }m\9Z.KG#G_jlxVǩx(+< ¶eZ{ 4|\mGkέźa'n{?.͛p^UTQ*ٺpCS*6NDmAEEԳͮ2:=>CǓ{TEl:AF<5<5IY21*ɬA0-Y> <2Ry [jRHZKQ #UHCڐJUdHK^_-H MLGFt53Wj&ΑNrc) ɭD~̦Ǽ,i=^6Oqdzmq6Ro?s2csh)dJjYP{c J\sD "{m'<Ū̠cJ`f/p%c@(RNZyAz \DGd/20T00C CB> 8ՒAjnIL>`[nKDWn{nj~?+<㱔Х0M1+Δ&^W^Y,.Wi^kCcMś,&xnȍ*Q\Qp'Sy ]qV٭ 's8^>1r|ZF+yL(bH,ɬgrAI!8DNF)Y1j}5`!)}7E?jB Ie7 e?AJ|.4EW_9' K{d)ɁYƳ\`Y ;XGjØ pOS\Ӫ=ItM+>'i _\n]K:ߞf5[*Q{!BmCN:]Yު%JFq!*%G>f]3˛n3]5xݮ./c^8o~;=r3vm:).K>Dfb1Ə;|/ѧ4]qQӬ{>~~񧶆.x28YeGw7s[kqIi3vt7};t3~KuZɬ%1ܙ6G|fI.'u,JoǸu9uzJ7ݶ>כ<^O~E:04Y/bCɷ^(y]ri:=DT+$ﭕr׋%2rs"dHRi؍Jn ;j|ct^,.@g:!~_4^Z<-o1]yxjh'Z~LL̒(#ЙHԙ;N}lxwYw췭,RF虠} aI)oc٬eb1{ژ݀mvE)ͩ4mvkUϮҳ元Ǫ/8fUI%Y),I2d` Ε`/6JqI|2VETV"}'cm~{n֎uZ*ewRb1Ehəs!pعR]kr +0 YMw^_'O3qW֖ZX7S79DZETi Ġ\luf5XR/=P-9zKsezra[өQL9i SR,uيR @Lil&IwyEy1`_e1X 81$N,ֺK5.>ӭDU.i;8ŀ 1%>*ӣ\'Rַ:=bOT^}-.rV>&-"+CmxK`B]N̽0yw*:a9椔1EJy/ܴn%. sv9i<23wh\Zآl UQ04 ]"E&|nXwƲ c#K _Ѽ\ʑSu1hF0hHWN+ϥ%`,raf} >Gf՟1f6yP 8Ҭ)6ח>EwA'qqḺJ͍W'Sf͑Znd& o5K(a!yI|dGlPt$6EHt ..DTqfQgu]kHq1tv/WcWl/>x%S'̖(bar rヰPTcE8#vZ픩԰3Yw޾TUkF>eFK;&͍(v$8y&~O){Ct^>`7sΓ'L~:EfȪ2h& %99+߷t^󥖺TxC$=栬SdUKJYH GI7+5oYb,^q}/ jLb/w 7Mi^\yW/ m[?,>3<ףc/V8zitSC=|xpb~S)kztu^nm`q 1;Fn ;HLi \~])pށL.?0k%=TMm N"  wTEW6Rp>k.<0QZJM5[ҽQ`[GI ^{ \!>7!f?\Rު6Q="=U?;|,L3`Up.^Pq<5D"gp/aR_.{)(Qn"Rs :;ߘZQQ+.%EkɆhZUTwͰbak-BJI$jMS4NMR):Aٳ3y\6DfK|D>ֳ`D:J7oJUj|'{a 8Ci=śӓ7'yz9wu?4NR?[NI#7c$IiXm!}"磤S}9:?,VRR=8:\\;?t`tӏ,AkSEӸu O>x"{2c;wyLͯtY|Ove yhˣǺtqbv-ϕX|zX>=Q^?ٛ)Rgn͛/㸌7]NnyG :|]Sn};^ t׳7ɳ[vr]Gcl}Jy] '(}nsf7L3mLlնfch$aPfPrIUcGvѲͼmkmk _o-ZZn_o=zkgg-{Bzi tioTy!n1y7.͡%%thL%miшqElKԤG6"1'.s͞w/\O,y& owSkGc1Ɵ4: aȡ@Ɓ! XMu$we 쎡xPH`Uk&,/Q5Le]K%[͚5[2h.誥hcֽCb+)JpO|(xfM{_ dF{.m޾}eNmG$?5fڟ^0K bvxyj WNznɛB@!nCr 11 jCW[l*]r!)o>F-h )'+I|+U@fj[۪m퐾::we6 '0Z2h= P|NJ1]`d^YHֿe!ngc"NF0[fpa\ƅh\Ҥ"o,ԪǦ].?_|-߬qr/B։znyib;ߏ-_r3&>sv~rxNn?nõ`Cث, m2ڡL2z<ߧw<䊁U9JkZ-$ m.+ Q\[WfcZG!F\TzvrdŁS&;u-x(y}?? !SHx9)VuPUySRNz UJrpr-jIF=Bz߳xNv QkNҩZJ\ˉla '1|0 -4%m9v3nlsLĺQMBƲ-5JZrS$xhL ]VӓȡjPJEt)vJA3[ *)aّO9E)1حXt?+`-)sXp~aV5` QJ΋/+e 'XtX8Նm5e2%, qzеkcK Sd+!0XAPʽꤰyu#DT *@&Crϟ !zT P ZeԒyJXN1rS؆~߂rs*[ +UIQdd"r<* P7h9cmFaeH"z[cpB̀j@o]\Gp4L[p zNa-˂@ /F7j~ Q?_97#"&N j`&؎X.r\^){b9b*>$D5-ë<!2ش~ξZ*7ΠNqJQ"]M fYR;BK@h =%! ƺ]yA(ArBYQ@IS뚋O{D0 ˓U`*BI(`5BΓљ~efNa`5>B M4\C@XqZ{/P4 d3P|/GV^@/Uv("8Y`\TXOm9;3qagdX'd悗Y֗ g[yטئqׂ%a5WW4T  g%HrqphtVsc ({;ufZA-$$d"B5V, #]h^?{* 'x )$@VZw<om 1T\MX0+@FU̦F*ΩJ&<&K^V3AIt@ӧ.~NZ*}=$Sq<[m3Hة{|%RjMn#=81~v? ^<|9d.UY,9z_b{@#0=%Y:piA/:s 7@l:_箂T!\VB^J+S )۽XC(sm!U ڹ c;|l/!F7,5Bq1ɒa1Bm𶜦 :dv%o;2._ &! |L\иf+UeuD^L,2*OUMJ ÚPSa<}j y5T cA6#W0QI'dkyٓ;`zJ åW@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ^qZojZޡ,l}F7msEK8øZ;.h'h4ral~ŧ2LGWë7Wbe _һRy$)!O.Win6s^ ݔ? \$M͞Τ_oG̮&]/&x@GRO[z./c0kӾ{~o-;W!YE5r[YMxWM{ЭwoϒF5z$A xu iVp(yGW??K.΋&8?M.OuWjYb \f磏Ei5nom7ErekeKiol. ")}$Cuҋ{LȑQ̲2X-.b&B('t""Z2s_i%J*Ɋ[R t}w,b>nơLu\"*U#ygbZgoh?L[|Iw÷᧸ P=8Ԃ\WHuӒ*U$ӣ-#Kbx`2L:+UypUۿ}/_c_m@oR5-b%"Z|ɳ͠Ǵf]';X}Uon\7a+ 5vۢ Y124dga<ƷiEOF>m+.+nܫ38Ƚ:-]DQD͋]=z/ա18xv(u3et!]2kr8ýrmgble9}u.}[m[$c[ ŋ͛hFEE펖PvkB)Ü]=;γAE{"r}v;f;C.X jdLVʯw 2}GG%lXOKJaI?@M"~}LFri'~*}*+ڭ@o1o0M|/._4< 13&k4_Z1_g'.{qKy2oRUOhȪ"gcuǜyQPl ƱmV>NBpsV|%(R^Ƣ_Eї`e@//+zdR Y\c/{6J %Vۣzx2}9soS/{ cī'g4oǟnC< cI*nN|vc4uvSeYgB414A*n|ƣXR|f%(Ym/=n(xb˜y;Ò:; Zciy56"Ȳ6o<C77(KnЪ%w!?(4+~|, |c_5bfk+pBll1Fxxɤzk4-[rMF:+ߝ38ڎkgtV sؿBӤ+yӐzO](~g $WkV{u[2㢥cĂ(x\p3l9{`|櫁 j$5.])-lV-x {j)y;'7EYJ^[hzT5hxqcJ%5l{^j5e"!yރWZ(U٬Jpv[6haq'U[NSY5͸̘< w1yH8`G;mKI:):ccnmՎʴsg@^CNfy\NF8⃹P4!=\hWÅP#%\ _zkβ%וE{[Xsjƭ[k߇{`sb>Tz@IU P%#֠Z"3X;L)Vml߮Rc޿]e0&潺^>UUX9@|.Ew1uM>\xi!Jgr>O9y47qRv7Xr6 z֩S:' nr n˜nTvo1 ϊnB4(6^eӫVh7UƟ31\Fy܌˽Wb ͭm苺*{V:)*Lb'C6}i>PHs^f;)ZMD17uy}yZ8"i'2&ۅTϺsq.r2{7rFvt>z ?D)WinJ=_}~Ztk\w`?Ƶt2;X|zMߝWZZDQT:23S!YVً3[m޷adp7gŅ(.`店<%~1ةq!GnmfOy=̦%(}, ykx޻uy;ZNOō>E>gRAeE<lܭTv)#)bJ?޼-k)2n}{~Y(~jq֝h:lMݍhC?Cx\R]w"ʌo+~ƴd\ʲ=[7687ui %|&o׋ϟ F!wcߌQ|Pn ŏ Hx!&BMj%n-w ޵#𗛹,avo73nA?c9zE$ߗ-Y`Hr~bXU,$IE$D)$\($`\2V\DkUE6bTb٨ܳ7Vu7 u%^֪XUy_Z՟2/?[)cWpڬFᗿ7kXo|]`yʧjuߙ~};@H,aaPUKQd\b\~(gI2 YOR>FKׯnimefiP캴X죽UDS2i|u4_V(P% Wfܯ3*`lj554.k@8kmyi 4^qY\t;֛(bY[XiK&+# ao+û B̗h Wv[# g^\&9 w%14b2a)o⺖q39K!|Yϖ`DoH6(o"B#94Xi)Ij$֣Ҍht&Il_˻*0ycIM3IV(渠L|tr 4899d4g'PPZ\>޲wAf`(R3x)5TPN-fU/A *mO) 01FSO2 Fҋ.帺IQM7=EiImx7񖝀árxL3Q3po'z`"gv,Sny,qYPMRʄYåͧ^C'$-;y<xX=I6@| :|0ٲ&x"Fbڜs8.&02sǽ, E ;/Ӳ׈Wi#ɠ30қu3`B(_vn /u:30 B!޲…1qr8KT0^:9F\";4b'2uIK?Xs쐽q CbJ`3JMeBpM$ZCS&$9FZZrŘbzcs%x,Y߬Gn^^}#bw)~R$8WyTD4JhFZĉP2bo1OiHpp!k\1P}VN-sUaWU }X*$IT r($+R,K3PTA`ePZ|Ͳ-Zɻx͘&[0nRW5/^٬kF92RСaIdNRHXBSB %x(D QOQm9ZtPѢ/UZ5gϫȎ.?zߏk/?cKK~6?d8g~ƈ_C~YFBmiq'8m܂9L{\ZxEг٧ г =3b"5T9,ksJ$h#Koh#`dZFѬ厭hGFb1 :6M~FF\-[EFUJ(ZZr:3L faRsJXȵO5b'vxc› n/ze!U(*3-u hx>޸yFG{3ɗFv1veNFž&2,0{\F ws,n L7D/ޤ0B' n[b, eN@>Y3. )C[#) :d$ vveKj7Gik+T D1Ab:UwLS>̨2SZ%p8By ggi2S/p .NgoZi&s1D B$Zfoҿ.~ݥ޺j-K܏zK}Лwgrmf^NKh[#!Q۟AJ py۟?sGe/DT>@lJ-M{Gۖo0D pji+dek/Ax)i6&np}GY[6:Rf|оxˎ'h$Y:Hz F^\& [6Z_糶쪯a-I}U5g5…x3`D7CijVRx[jkV/]O/lr1KFOCn_ȥUXAh_P/fU3nI_oMɽaC?g#VS; >JCxpMelՑJ-ju]JGzڐK8'!}6Z-.,+&$ٖ<~ƚtD~%奛[Q-1~ mƹTvÏ(HtUDRق`:uݫ#EqP,zyHܦĎpJeOLiF6G#c>Xᑮ fcl0IVecL%:RC*?&4ވ H(T-'+w]B8igxV ׵xm\fz?$,ʾ<|[H -q,F<ѲBT:P(d`P8 s-uOEC$)Dn |`%̉#vJiSvᅐ͙=ޥ+)YM4h2-l[aI\x#0>!' N|_+'b|+tِX]gV?kElHZ5$1Ka#IFZ~0w#IBKai$Ceiz%+a& \eMk^|ӆxnR Eӵ:D*D9sATH!ZO =fsʵ"4b7~bukEjs$;wȢ CVԻ*t|v׍Tg=sR$àԋ`X`NKSVa:2a%"^&Ҏ!@yKMiwTL)%#q _Kқ ĕ /,|/NL1}?dV$PmEsT~ td8{ށYg7^;v4M3v+ H;j}T%m"Ց\L)mLhR`*2r~R?0$']ahZ|{;/7甫2xlr_-ـkV*ٱ(6 Cދ4*>O?;v8h6Uk5p [׎"Z:ھӏ|ِ"]|acQ(lW0.#YZipmn` \McG""UUV.?-)ѕWQʶBqO R`I-qRXଳB=gVE!R~VTkw]`nlK#&a/Z\q^/d3lv]o|?k<,P3Gyy:̠3Ծ)ZjQdG?ǃrXL:YK=% iPt!M&V-jZn} bT|{( ?V%$ߌ _ˋ~>O7# Wu=XBTY,ihW"JCyfcP7/2E@sJ:ƼEqbMqkgv;^vJ*39d1_$B=YuŨunjhngUT ׎E.씔œLB|&b=+SW{4E.ٳdEHBWʛ uD+O8{3Sē;ͩgZ"V̚#93 9 Q/1}QN<5$cc릩G2>M H:Xݻ@9zmaƝJdwe>;пDIw#LRHEv>0Fb-A6 rj3 Jة4wg#AwlհU^y>F`Aa8^}NL㈂=l$z.@җػP8_+ lƩS 8rοޓk\I"q4ԯY$H.i#}q[{B~ ՔnB $'#i GUa^6LY9٣Mͳϫv(GlKlNv#'~s):<Mi߁hqQV +%lSKMK.uz;Qf甹S$,kчl׮u<'A.VPQԚކQXj9$|zGވBBa'7flϜN*cW*Z} (Lr)C/xo Sw O}o]^¦TNDX2FsmJ9,LJO?[9rb^&p% ب38Q .|QD F$3yz$I;+NHA{+?6Yd<I^+0XxH#ZO߿/3ҙ^#D' /dqm fMa<~v^LyTF6b) )&#^D'c÷g7x% c)-"#+m$GE(`3fQ@c1[ݘ`;4Ӗ-*I>ʃaL)23崌P#"Aۅ-?va< qڛ>PQ P[!Ѕ>E:l^.֧wwg =_ZGߌXY\N+6].4^dam1'|z Xuu6Y J#a k/`gd^T5 A+aФ.OSkd4ġ fr^-I1Iy>n;-U '!3/e(J1JO|z-dLSl@I[:.\Y$g_c OrFF :Gᢍ!" V8`Uu⺋A!a'?˿!Ղ -2%cČ/氰?FA^eL IJ;4NDd;O0Cq3&La&CIsiw9v(ഡׇ`8ek pISbT=ߧxCA01YJAnZ!qD 3G~H1~m͵1k,6MF~/&=]$Jm/pLȆ_f?RN\$"YOHa]FiHq/G {= %Hq(_m\}/_CH5)dda9A#PN89_y`E6ݥǘ㊴0cz # ksp[Ik,JQXXot٩]ކd{)a_#}P;f:7Hx_(.E A0rhHG)䷭m!t$ ֿ#fKPr嫞H!( =䳨}/S4:p\Z8%\(̷"7AT>0U1NZ؇]f`ny=deϘP*t>H2\*Q~/#`:φh D$erXL_l XG| |3#Dl>l~ugI0d+P~g(=m_Oe,?-܎~7ƿeqRz%mڻM{gMbc ߽)2j +ۍ!=N٩IE^2|vۖiF P ΩiqÄSKnB-X?T}ci.pUe\v":8cVkcױj2 =65sلk>O3VN"f mSRD˳%b18#{&k!k{;vjF5FI;h[~[ӉA,4TEd 7 z\>bW/͏VcČcJ^ ˾{Tp>+l!3T$H\ooL`S.sQ!]官20A9Vp'Pܾ4 V5-6u~JP_X3^:ZQ20P C|VOc.2y 0B=YG*p(ׅ`7PC z"8mn MCXׯ޼SٺVu$s+V墔[olba/jјs0TTJ$Pe3KSD1ȢmYK\)MfK|WX.ݕ.ήG - `i1 4őeyE栫VX2uZ\&f1(a~J+Ax>)LZyoԕI4IX:Q.i4MrݐI%1C}l,,^8Ѥ9R7".jݖYԩ7_MP,!R ,LW0eT$7M~)L~Ѭ=d9¯qä. qJ5#y 2C&K2t jGj9szգ}@$[^ĆA\ͬ;x20cJ"S@AISɅkhJ;\ŽKU7Z^(QV{ۀl<46J|7T(װxbbDIc)MUv7ݛ`V\r|ӭ ή{nv~=ZZvDiZ:BЯKSY;IKs/MN+YChE:{0.\b+qVm!%BbY\"byQ%"$iіjmvcV{oFΡUL عtr7ݝ.ORfओ  [Pi,# Z `2q( йtX ]E_WcSc֡tN/xOo_G12XGE #BFhc-;?yMKk_mXʽw?dZya2w~S_j@3L(]ٓ!Cpwϔ <}.bD<Om1S,1 n}w}8tbT s[RX ]ߎҢSšۃήG+W-G\)t~k#OzF8.6}*k&:OBNڅv{m0nw+0֜ܗ}҃i0P]$'y;= Ȑ6>Ѻ]zdY(m3b-K+dvm~Oq+%UP%; ubl]I jhN\FlkXʢFT;hEA԰6"NQ2_F5k=1Eu'dEt@Nph9)0ۃkrfh0N+o! :us@D )oBD ͔z_ bdY4In*f:賟JvM0kչ*U2M:CN-; 43?\V5}B6lGSxmd$}fIT+i'z下ҏk o'?+kyV.Pr 6@.Pc1\Rp(L8ҠFQjTLXE(>2q8[:V,c6_Um y4Z6t!Gtr褧3*:1(M.Lwl¨4n1@ z(7u-pIx|q ϩC~ƥWBwDjg6t长MObUwny:#Ln!~Y>g936m?V-1iР݆,O,<{X0`t.)[T"{HJ?S->#k<ׯD~Wa%e gx<϶A6^?e6?|=vwy6z36+ɸ6 evlTA`=} TQ o L60t0 7`2 Jl}/F^{N 5Č.'!*[W@xۏ|8ZZGVT!#~HtωI7SGEnwѰpBW} ,۬VA-pw6=RĻw~y}Ewx{%n5ڪz9F>&e%s5&w:s)ueH÷@ 1b?#V Vu:yO|2j\qZ͢OOwuWӗ+\f3E{-ZR|]zx[lj^h@Eo]r8}<+ۄ6x8ꪮNr,ikS{څz0}Ŀ6y 0c# < 95  >>2u ~\n-* UhH_tyl#>qV%80v.qimL̜ŒW iB;~:Uv2_YT`1z㝲Y>T#r/]HW4Sbk(وȏeHg#n"BZKA{p @}f>~ߵR9a Xs>ERz+E}r"N5aXufg928$gTYŭX"gF\{4Qlc ٩ k2M"Wh>) W7̎)x;>}Jb,_hc0,V]A!L,*Q !q$}#h2-|suE.eRzvOcI?nDSL0bb 6QaCIK\= Ϙ峦Eg.8ϡkyK\܅)02wU0͸Rm]Vt]$sa͑xO=V c4RK.j Bs(,R4_.U0 lRaZᣮ^Hf1(:SXy|ÛeYu&QxV@ϊջci\*Pܥ`60$L{dj9)'Db*=O|BVQRqMU着ZttR.KՕan'U8^~L\y_HFVFͨ ޯzӘT4t! m&՟:P^С)>-!AeWG/ I>8f׋Ω4=J*w-BH٩vՐNxOJWH|۳B2)اX5sVWC 2%d/u M;jY^aya ns9{Gۺ'j~/?>@b2QXGM{yYliY>M-1 )x[ L1ʝҨ| qmh!~)@M͘S% <=7ۙ'v).3i;ĭ㼎}iX-E٤M ״3RQkx>/aX!8D%Wgoq h4hbq&| q.`wgћ&k>,j8'V#=/}%E `$E?#4u5se?;--׉sUD&JM{QlKx =E#i:mޔwy:F`G[fV:fP4f',*0*yҽ4>t=xOI%É7*1Ԯ@d7=8I,ˌpfd>u7z J7gM6ҹ#3y{k0O<}Y ЌH#o(Ɋ8̡p3a@͸i8Z} |p#z wػ>oS{ʐ`\e: ㆠ-0"WHS)I8Gا뎮#f:.7Ǚ]) @嚒}݋U+O弸R0L,](uo^vP[yuvfGV4ˤʫCX)Du ,zyX60cY}qok>Mf~ore[?'m#/>uW# 1II_?IQxw~ dKu{Ʈ=:z 3\+a&;G |(rNkd3»uz]" y |}YՁmDf=:ʊe˒_2mEMg ~<|f毓ɶ84u%d5ͱx}m-;#%PWoBX\#I=AD% ̪By-)@gw@*V7׻o1/Aq !p^'OI\:ȱcw}1/pWmQ+~|\;r:&:V{ E0,d`Sc0Xg+'3K~$ !B7zXGHPa['iK0IK3"陁"hPNZʜW}]VGER%w-J@JSaJC|4ԉ H[Jױ abp Ռ,% cwMZQ92|.W)Xuﮓ؍X4%(E\opUa"7|h^1P3*F]O24\礷la#wW` țSF'K&/(@ ҧt"}>}jZ.?E⢳yüpTSMеKꬬm-ݪ~̜ŒWp[g(+I8`TyU!]6PɅPEީauu:?:B uP(81Sg<*;13?͋8m1uE>D4[@*$ RBڂ qKjCkG ʋZzQ{~#(gǒķϸ!j/ p˜"2٪@&PvK$;k?+J K~fu]YRzepu낳(ǸS{& 8ٝwLʭһʪ1]iƥmN[ȥlj[M{jt1*+0]2(d5SxVIkOf1(v 9W, :[OA^t`#O̊tsوFEohbMRBGv#PI1_Z#LeYF8dX&663.\w_T]L3 QS@m5^~G00y=ppPgW ioP%暩aNf^PK Sxޔ 8qzQ 5: HMz‹軦F Ô]'i.o'6qm1*+ # Yӽ! M<Mȕ~ IF<c߈HmjSu_t$Q kGII_[vdJڹgg%3x$(5\& PaKK.TC.f-qeϯ[J,zwp3t4N٪ `dEi?k.5}}$+dF*LBQ>8g 5QxZVH'# \C$VhT&?U3 ۯE?ż[ayωۮBwC͸u%8L\_5!ɖ(Ơ0Z^YlSN2cʀ6RΥ*Lj [Yc{/&Q1DmV9Pk### xć\:spX2I20IڷU8xa!|˺R!E4Rj-nBVRIXkz0)t {#֚q30?K; e)m  3e^fYe<Nqy0ihi~؍M7fG=4CcjgN{W3~N,41&fb[M;2%[ʬjR{ªr;mlm#ۿBݢ{ޏMr.v/-pC[^q~HLK5IY~(ssΜ&rTydЭFUcկߙh$!mϫF_ItRm{?)N/1G1PVqu|j~[R + T-ߕbXѲ 2)Nrq0t%,_j_Wɋ(GOu\D5?tD16hHR{I̦ kׯ=:*1T $׈0DL,zzs\=|({u]m"i  U; d}FIO1jWUd1* Y"hR5 {l.Λ;(;< ADMQa{"}. t"!2qsha̬Œf(\!堋RdVmyqLY$ !uI%R2c^2zye#MWč~"I(e8Giߓa`ya02D\F6xE(% # cSfK0Y19Yre>Z*җ7#L<#ڏQﲆEH4.8aRi_&BLk.D/vo{F՝TmvUZ.)6Sp"$`cYgxCqtEƐi6GC*tVL@&FyX=M8=]jbl׹OiMt>ƒaE6v0X)Iul2~pmusbګ:&4MG>0äq6AxevnCosL0ʋϭaۈ˃q;( mai®S1hi>m ]#ST}c1J)m+G<:K!@x>:TS@fDEV #Y \N:0| . QD߮^s:I햤e,uLy{=}c&uEւz1B`V^&(grI]h5+kH3xu6Tgeӳ`㡧 .©uQm0#Uxgۼ;1!P1H3@ "E+ +E0r8nGmSҸ݌Zfʩ^s#檳0@/S].ŴW8N~_PbZc]LɅ-G5mOHX1?j0cc PQ1'7hަ[j GmF RrSkbWru@Q㪈<KQF n`9ZʎͶ2Ͷ;jO$JdCIbbT8ʩ@+D͋ X -ٗ>q e3ryXM71 pDܤ)MR@ w6&|a< 0_=*]泹{>1֏? r}\jQE0NHz8uQ栞rIwYqkLl)q$Ҧ;`Yeo\Bov{:H&x!^uir.o0I -kK3u}ʶ/TɨRcT^{nѹ;WvSɹ*>D4( JM˨? 7s"%yͳi}tw#T sGR=i,+ܤH뱇D6UfЭ[wɢY mKNϴ-k*~v*ZFF,up nzdaIvչh& (D-(!4C\_?>'Bx9ìs0|RCLWY+-)Z cE%{;;ka# RH-G\sDBh;0%=ӆ(D:wQBDRi<~YB%*<ÎnĶhBWӜZbޱU-]4C2%d#JS-4M%Ux$7ƅRZIt~m>` zEp m6XS`ٱOd,JH ,0- #g,Ko04?s|G 2r4z)LY}ŀ>qbHQҍ l4|j9Q2Wg7MIJ0;NkwEiZBtpG&Kxw1TcU.>aQ,Gq9q-"]~b) Vu\јK1M1Z#y} (C`%$2cv0V*+f2'w"1̈́1R4!fH="Ƒz [:$†gIm:R|R5*:gMzmMeHhKmDhUn"+LYyNׁBIiЇ` |%&3ɿ5?xC`# Qٷz)Vd#4p$7qzb*X<1,~ ~y` ~ o8{"0L~H>Ũx<؋_}Z| Uh].vQ\ȳq_!y=[Xb#@ ]i$e>Ɋ3l{i\%a*郄KFy2u[q(vn2OH)̵zc?l+a/߇Fq~O[O!<{Oɧ+@s.DƹC}GV;uu\n' 98WLR*R&|jQG+Fߦ @fIsPp7r\huĘR]gIɨ/3QM2"WB!I'BuU^eE [#UCk Ea\ƭ\ITc5Sh.)]r9{.Bo@0U{L/e/ +Mf͸7w`uyŜ3=ZtfKa_!ܛב6ό3 Q "Z(H?&: Q`Y󐺷uР Q =5z Z%&ɭareb 7Œu ۛiOra*$mt T_{̸mI1_~D[8}_>d Lèφ;Yq0"_y'9a"#ʐ,4L ͳT:i4&w0Je[eΈSm~)s<{`FL\'LI1aopϠtsd>M^o@}v#nG5 dcl6^8X@Ԁ5w!M]ι.K݆ƺ+l Αvrh46(WVT~6SFzT3 \T̺?P"ۤc4FTM>`|>΂9~J1k/o/EPDǒOn-^T8BeJҥ0#:WV))30hb6ozqg#>HXiWgҽ4cu-qw56n9F 3k@YL60!9Dד7h/\nՀ6CsޥjpGkVmjĢ<3"1#"gD¡ AN,[tm*^@y)TxY(;:)`uɑbX"N|qRC,!f խBZPI$y*RUT k(\t K@Mr+8T(;M\t\ ! w]̝R^Qm70NQ`N٢Z{wƺNuQ9BH Ad X@BK!Q7BnR<oAUi (/m0 2\<(CZRh6"]wH}}*l?6|M?CSl+t&x.%b*YYνɳ>'2r,hh|f)Fp뢎,e lޮ 9fO7=ڇi"l5@eŹ>R „P@wmqHzٝȪb4  bH(x3Ŗd.-ݲpܖdB`D9!ǸWNjOD8q=ec: 06{rg ٵk?rr:jb@ >S]jTC=ho{^}/k6bb~euZyٮQ9f|ъ5괢.~kVl@IҩXkT-a7ߟ;ȠOz{w$A޹幟c6[o bv խtnR 2u 1f0}N:OiW:KY8k|;WwK8ݟ̄:C+%gM'ٖ&Bt}Խ>k+ 򥼉M Ü21N*?=BTV,qT9isF}wqYpM?znj]7<%k`q^\9$ew?R-@;~w|߿zyd+.6F:8yHP =OdlqPjOu[CNէZcMrEgB@Q0Ão_ؔ>㲳EZKE)ݽ RJɟ'/ubzث[vEWʄR8O&'28 wth-L<ӄ8 umo \0_27- $-&G~pLnjĐ¢s U%Te9P/e{yc-+iqMf%2&ub -تorbSfz)o`X70&[UTJ&N=/CRnĆZ"bJh-(X BR:Xlj"CP;h@T!ZNh+;MJ I-*VeIWG*$+ Lf՛5To~XS۪ {5"KeQde7Ȫ?տ,~yxZcӜv-ytb ;'9_zv~GP:~/ޣN:Lx/~&'_~Wu;:??9{g E7G8(y>93;u=?O=Y/x6//uп&[cqċgK1kQ؎o41J\s;/.p7ʍK=:mhNlԧ/b U1M,k“DzLF"R=6hdфb6SP}aHdnW]a+b ADzJ=b)enqyD;t6]v]"{~::S1DLM!ء0P1ۻЗkS#rc:"_>,TY3"|DmQ֙4AB` |9c6xA+=//lRŨe$$y?/ VpvкKay{fNK;TG?AŽ} Hr5ZKÊ5I̿|ҲI[n4fuo\{ tKIS)A ;/?ĭQ >:B=|ZVi^q]!ȫx{Yys>GǶ\Ӯ;pvINȜUZ2!VW0wB&JgC,oqejRS/',KCY|e'0یh9`ن|t` \@Pz?R%gEvD s=;qHOU=7u_ ZQB-2kMxws|!%@VhHM(( Fۛ'!̺@qy"qa^_\C>Е< 8qA退kUbbL^|sÈL{ ֝#p(@ͲQC[lmx\wK6_K'>z 8.WO?vZU{,")^?lf~qwoSa6U[-fq\xk؋|X]'\8#{I)G7GfsYs1)1׎"'ǭzF(h]x|>~?ܻRC9bRV բ 1-&h53UᴣڪZ8Ŝ : 4&9ZāD}⍚y3nVrL F2Ď|RT ]j2ޫ ͊2)>j{z+>ĹԻOi8KoREaC`P]2u`wPJrѢgy|7d1T2hzP&!(QΖk䛁>F%'3^LK _KtSQ<2N5Ȧ(8;;q7uގz*Q1DnIRidU(oP "Z&19B\ f u[|Z-JѪ4-mHiH^,ch1m6p8Æj,X1dS bLVIgc> s_5UVkfj9hQRV~,.,D؜ԆP)d ?E[Kl\dhtIԅ&M]|sϜa!əϷɋ(z 1TVA*&,ASH,x_L%(j}5Q&khkLQ{`JE5&yLMW@;0ݭG +?P8xP}rj宨559BŤI]I5`UrbϪiVѨZ9E"$Sis/%n@ts&TYqb!]G-q9nv~s4~fF1O)n:/tyA 3C uͫcq7>Ѥ!(-QgmTnwzK=ŸIȊfꙷliCPtdm.9u]GáOXsBhb.Dyhޢ%TBTj,)\S}Ûu ҃5 bjpa|0lD/"fqՐ& 9vM}.OHP͔nKEya]|ѝv^W/3ٻ6$+43@7<=O#ڲ_')R*E"ն\ad.DFF&xLY1@$嬆O=W w xIKnDb]bbe5#QF4Z}|9"k>FֹܗAle._uzDnH?Zha"7|rQ~=tlВj4flT,1^t\aQT\K xMRU傲 B$pZ9P_AnFZGstswbɦ/'v\_J$9nfy"ԾXf$kLC,X" 0(0`vK*q}#*Cx YY:.ಪ,xSNֵqTճv65s{T,YKUHx%Ӎ3V YZI/͸qr߻~ `p}ɉZ#Pʟ]ί1xY8?NG+cX⢤4}Uͬy},~__}{ NxsY7W?\彊up)s(hg_{p>J<L@ :0gQKC"ֻAk4!m`A Y Jp(TŁ p{ @W󽢄NZX,XVe*kUYkʺUAHG ?Gڑ> i3HgUm, @2]xĆֻ"$x4*g!d_XSJij"&?CRT.Sj ^4F:7~3?KF dBRu؅Vl1%C۔A5x=uHD.~eY‚>Ӡ:ޮl{fuKuXKq%A*Ì#Pŷd&!M\JjAKMle[0%G\MWĚ:U3LA|,uO'ma*von\.{R0dz|lPm[_?2v1#^mKaB1F!f]1S rL@mJODT!m6$qo @ GuJ|yWj D$Q{|P>&HkjՑhdfy}j RAi0 @qb)붲cA[,Tq&dIR("DEH(ٶ峰We'෹9)t Z-؈0}ps6 RIj-UERI[%Yg>5ͣɧv5L#GYkEr+,j[e`yی43$`9>$aJy:D0/$i [#O'/%;-d_r۽+J7][~oAJJe>,A<ns<\k$|%`9K4JJxQ%okFʽA&]V dTNJf"!\ϵpmMJ"6#Az%n?̎ΒޕSoŠ1+<; XuzagP\VWsVEehCH擞6GZ __ߟFGETkx;3) ٶL{g;y~2c^fY< 3+Ri̫hm:i`xBUj՝M1@1?ߜÁ>Ʒq~:0ҡi!77pnd$7Hx]w|7|;I_Si%-rNC.7[smLjM1/ǚ Hw${iJ[ZK{HkzYEe5bQwc^EUx"=ڷ[ܮu/ɞ0Bs5¢, /iCl]ŗG=v1\Z3A"te:qϢUidLZF*u/-]Şgb;G]GC˭w~q2jϿR~L~=PyR()tyV;eP1=»Vu3)ɴk/[OXaAޖ>ۼ;"@1iv}uy^;rϭghEoG[ njgK-+o.n:oDg"O_[kZQhq@_(%#o!T{(vMtR dƼ i-(}k^ܶNFc%3Mgeju5ϔK_ R*Zħ i)z#T+PsZp~.|WٗejiHw%rǖQK9 $Kp*iYJ2TWNrU,Ρ0. ER]HVĘዯ.Wo,Pj=*$˦-shm:[O)S0e HqoCZM= Vܜ&<ѳfՂ&>q~f;gOvPGvyxip^axr t};nh*=,.l&h BҩhG @nuΖ{xj E/8.IvL[3ZudjtahT_/ xy(v.&%^8KcNUWQ)0c f朹j I=^}2Υe T=(R"C$*VBB(zH8? ,yj*3MpP1  YUpbL!jYx6 ]xT5"a^HMAPYh5Οze٫3,BcpSZݷG wNIfs\֗v1z|]PBx6kQ U-̮]~^dJ,UC,Y΁جAN8UJB.IK VA 7;!TI)esr.L\'|9G8E˞MkjCSfJl|;D\IL^Ȃ%A:*Z+ l Dey#iMRsLKJեYӨq|{hR͕>/8vriMa5Ddfg# s|8ƻpL>Q$X%*eMVتFѴSL';;?%}Mx,wդ=Fsj^6b,&D )*DLʐ V- y)d x`*`s'K25E24Q  wsNhzm['bٲ1 ʳ6dMo {;[p3Ϻc5aZr5ʂs.a-<.dφY<3Y;3Y;u G=Ot~gtYlY/Gom{.`GgDWR+-Odc@p~t.VǾ(\z3k6#om4k{?Hl&=(6^cmH=›C-7Ņ}{q`g_Q(sby[Oܷ;XbAbb8;rFh?{Ƒ)efDdFbgv8x1aӒ%Krd{E)ݒKn2XŻTRtwzۮ9okH=r9 ݜ٦p4DhTi`%fe6DK7>ȡEzPR]UU)t"ҭ(*#x0׺:YwiX/ >'7Ұ>%rͰ'׾eg~  g|`sܵ|l^y0}pVDqeS5;Z^Srэcew|Z?jՌ.uDj6/3VW}U5.NL S0 V[J!f<0A\mrj%R DE@T% +ICOK܆  Pph=YZ$mF#`OXbnAoj\2oHؕţW8n:J𽅚`A[9! !UMc\ E`8C=-Lb<9! ƬMݛtɏ{`w<Wބ#^&<姳PekOv)[F zEQ70!V3ڗ?BqJ:W=.񲽩N/2s9g_߿g*م,{?~Y\l⃵2;.oW'1M\TI9,u)o8.Ɓ3v ^ݛzdefEټofSN%]RZ!pmA \=^{.FqW fI8~t{{'kv}}DCm=Fo(tOx9)֯}vgvH'uօ$ Ƕ>CMxI$]Iw>,:ers<=5ӉB)713Ft>J[<["aA[k oCo[[%%'x9#mF=aޕ>|{덡BإG~^PFEX@d %nO |PU6@NYO,Տz[jjSlV|, G$ 5X@3]f;2wW wA[? - Ü[260*}+ij/ K”|#k=&ZRQjf RCuЬX!Ѱl+Bu$fՏJh;X+ ؉3cq)8_0*ƽT8gb&Z{|6 ՍFTnj RH Rg y%1KsֻR D=Ӈm| u6aiUwr9!2kl5¥Mk6a6' IPs 餏 2U)JJظ CXf37 x^UyNN=RT˟Ϟ̕şUs@ƸVO[h Us}n3݅>Jʊ _(ݼP3o#yݡsH0z2YD_-2{:^jC_i^;'|1ipouJ`=T|0-2>n 4rǍ<ؕ< ؓ|恸vF<մG88r3OTӆ󫬹+MEz?Tz wfuOm:PeN8nXY[@r8x9ǞJN\Sr9:t)@ᆪW]+bM+@EJ%Px7Ֆx"I DGvch[xE}%TڭJh'DV9Zz2$KƗ|O}<+jfVHOXAJޝ6?..禦R6߭jXD[xL@Tn3*h -#/@+ h9yf.n97 m ]|Q}N}ݦkųjqmT((pKq?I1*/|YSԸk!wɵy0u3`Jp=;B[?s7mFwQr@?)xLtyrۚo%p*,LZM%--V_Nw -+K?x/ bx)Uķh_,ы`Z .>o,0F &eKxg}%@;K 176Xvbn zOh@i ơ]k<0)b@}-B;OhsTqVCdݒ@)&=.I:mdq ڡ>2gI,ՁɁOq{dɁ"ll^Kh# {sA9,A#[. ;TPѸ&ǐ&{Ӻ$.Ȟs#4zA*%⤂s!d_Bb,3u{"'#byql B![a7O3"0hJc8PRw\YؒbRuV.1wmq%7@ua c%L/EPWYcY%2j>-rIO,*YP h (Q%{>lDzsAʮ9gJ:EtjqC{6pʼnc:X=P!#(&*>}z%MRi-!Ri)fŷ9d⠙4"%@H$ś#J㭳( ʊ = Aa4p"k]1=❅q&+HӒĕl_@tL*h}DPRF&~lrC>R朩M Ϩ }t$^ ̂}ZxTXa8܄}E )?T,oO.g&E@QLD r(K((\4lb&"#WB8jc(O .Uh [ANGhCL&$Po8l^gt )9. ^F(|RF2rj& 0*Ͱl>+\cD sGC淐qpaz!z-y+[ =DZ 4EM=bn[s\>/t)4º5I[٧Z^VԸO3OGʫ:+SPnoT70*kuQY-q&w3{e7?RO$&u\b8heU 4!:SPz+&I8)`sIvdJ4 \sl69"Zpf9?Y rA=rvR{{kcxi)D%"5F <1[TjNEk > M)]]\w0:>/eauu,ZQ.[Qqb=b̕Dmu}$6=I r*uZoL("M&}cgĩC "kqD*bA[2!LZfо#3Z;QϾ|YZ  d2=fYIY4-CN mRWETloJ˵d82ɖq Iς]R59S-8[|}f8o.Il.4ӪޛdcRN[=אU֞zgLK, k̶TN'ſ<ʧXiJnp3O&!bC,ey@GvkIYEt# ՜_~g#4ZyϿ~:M .޽=2X|aΑ^{=k)W'OdGOiw:θT#ǪߺӓVS/e8wMAWBt`d#JVQsZ  -˴cڪmDS&lr s˽S)?n@6m}}ezfF_;:YPA|OMErkࡩfTnvOk|n0dN|+YpXB*1^6z:8zV-lYV x}!l$ߩtgc̈́ Ic,g.GW)4ZAFk7:fd-4Zf [u5pT1]Z d%kcaS%Vv,ֲg q8l>TXUV7ƌa?A{_ߦ< (7(n!R\TK B >j%%mD+r=Ye}'.Br& ,"r `+ ̷qiokC;g/k@ 6"(XIzy!n;1|ZlXse$JqN56'c3h򐓶AD~\qf\O}69\sL{8QAl3I(:DkJ}.h&9Yn (,MYBlAJTazMC_F*CD횤3O&xx䌎"m]rg cu˶5nzygZ~}R:gl7 OcH~ǑSٵ&9TTejS MIrǐcAn-hB1G8w2lNv T Qնk9 TFvo MG^5:gSBˊ= eeup(bT^yY<;碣&X緯iEF 8[BЀFf@,6:Uk#* E'_cVh $GrvȚR3i8%2AOL*ߞvj>dHpc ceR;I}f?p~ `GS!Wݞ$.ESFaʽ+m$ MBZT"cLrHQE/?$r㨭{֧ 勑sG hbqʛTT2,!2ta# g{A$l ֔q2wH#Pl6aFannK85gAЗϮZԃ)~I,I"uE:F$lx/ԫS!s~gsISs|DPYuK_XC/ f5cߟMӣ|~Uj?_So3 c۞n~dʷH#wk+y!\me U\qy}x6$0c˻Ogbz<)r>u5~#-W!KdlxLjӾ陲Zw\o 0ٱp3/08:%c^nv7z_?a B黹0)RXU!^ԞYZk-c2x~2 3]Mg /`l6P׻(P鞂|zZS΀V2 Ê[ޝ Vo󈞷L2UIOyg!K==$#.SmE궂y u[Svc138k- ``|yU7>ki>dic}NնNOjqII0e J@ԁs@o P@+Ns*, ֻ;G~o3@}Ls9`Uoo0pXoy:?8kh:w\3pQ7%yB@Ԉ>ܑS0%]Bf_0@5gVܚz̓ɷh?C=v Jr3k&,ޓƠϔ=FCm+oCSYux0n$HV d3rl|~ /8 <}&& Y3W7AF)̶_V5ή^7_hts-fY{ιB}uQ=([~IŮ#ګ11b I_[k7/?t.uG*uKRï˟^F+-yN؛| :5p;'?Գg}Q3Y>>Yߩ?~OEzle3GW"4rxlC/4Sfa0GW}@P0shՙ?TD,iX>k7$U S Y]/`ꛐ.oz}e5Χ^Ɖlsh{[縞}\_GD Lf4%= ϩDDb, ŃɚD40wo?k>ɰ@g'"ڻw p?VA\ƽV.b.<7wt%87*lq87c?e«3>h{=|H4zE Ul=O`l"hPz?ޤwC8sF=? ~ A}7R|j-q:TUp5mE*tȽЖ4XT >zԏ'1$cꩺث]lZe}8 TiXVXTt< Wk_ƙ6 6SkpgTs\f@Pmw:1BqҎQ @9(Dfi1rc%I\ JyϏa,b5PKM BlFЦo5#+z31DzjWT:,@2Fϴ,sJÃ&:*r##)lX$IG<7:Z4B"YPJ@BFGIYiVk4Â,>ʮLqXt+-78 Ō93˭Rsb#VUa$JH`o"bsP\F#gU :"@ZBRI*Pk}ϡd;:LDL!3ʹƠUtXo,n"xtKs`C2L#>X;bцg<vIibVe-ˢ5Rr# 㑌R>ӧyS'3b4K+*WLlo+8 %q.s$O2kLbFVҌ@@NT`w9] 8h4(!yh6DX бJiT)IcD]$f7,+lKiE7XQ#3))BDk0#G7e 35T*83"NFd$ V82̨M\@DUUh,I@i 8DQh npTkdr1`phP(KD&[xn+IJ<"N ΧS9Uv%u_ೖ J&ˍ&?EUS0/)cŠE$i zs0}r|u48٭l9p- t0Ĥ*$Kt8k7|Y![CLmb ZRۇ>XtH"\"ld $GÊfbRdHW}q$zP-€T/9 TTUd|*Nh3>{PAqwyب~ItJ)]󔢔Gǹ_@IERwWԲB/T IIH?MRܧ=u( l$( :O`y@ ==jQ'U49otsO`}H\_yO2/  |륑PaW^ ?xdo q٭rx0G5v?Sbi{4{ haL4f@^WD3N=oÏGO@/͒#+fiVкi*ҒhG>˙h^l/n4w+9xw¢- /-%b B)9~/3"}:Ӷ#[KWe[tyثO\Z7yJ q'1\L><$?>Yj~& ) O*.ŝ&q(yo~f P '/eP*xsǚ|ݏ5I?? Xa Wň ]* !h)Ǻ RE1{V+|cBM;kAKH)T$x.b-mx $([? {{{D-mΓx+vοj102'i?$ 6ʇ4a_>q'TVS*~ߦrftP1!Sn]:Y}P%q՟x`/ sE+${UTq=kl"m \ʖ#햙DmϤv+鳜#C1j)>NI)8.GyOQιd X!1RqJ 09G!HLK1v\Xńs*%N5W $.\\ Bn~Sdyb"dÏߘ7y7Ózjr 37qcjـ !$i h2h$w&u)Â3yl5F8o D&dd27-kW (@1EHj.b [ki"T ́Bkg72Xqd˒Um$V|h$Ž )VH, <8i&D9-@87ij(T-:^uJiw5yks*)1spg *)ѭJzsi[«Ҷ{Ҁx6)_UER4.[wF>^@G3-c3aS PRn\C@oLqiW[eflhbH="b|,N/rDZ !ZN'qBjF41Ry͈PUlnMEK?xl-;Ûq'@w` ksJ s1|ii ZX.?" ^ 9=0s\}Ouwn4x?j2JΛ@K?cS#Ԉ 6M+T |U }($Y4R,5R@1Tqd%ap9g 2.k>޾BZʷ޾b(~rU:N`>.x 9ו ` 6L7`S]{c:(4𾯓{]]ۉMOYk/q3qsۀ #حxrp`BhzL4)=12ȴ q,l eUT zb~ _-6mxƳ`flJ*~4=\p>*0E!xLBtSM &`NiO*jbPFXtlkj:~o6˘0LR)푛R-.+y"C3SBj*j)b<=؞J쩵U' &_}6=&IN, obTc%TfPv,q+ӭKaztCI, ͙;?Ȯz]b|MsvOmV8j PA _Ir?W'{7nh a|B$u;t<ܤ< ĜmՍQmh: CޏևXA =H%0="=Ê ֭u,к{ՌKUj1H /Si)~>wm~هm/}1Fr@gSN5rZ sk&g!p$Q5^=rNoPݯ8bKNO-jwӁ?鮇uNH=SoRٗ0{5kTc,c H-񯿌3uڲΘ?4wAj0C$‚QrY]։m2Mt^_5cU:&~4f|?>;xWc`0]cǯ}]qڛ=8 N5 ILMY;b2XV[-hlXF} (Y[;yv=,N6~?zNvxp[ydiw&fHV̋ /p3g%8't0x)`s t9mlL?fl7tRgAJAJq/r -]Վ#>`̂y;rQGUoqzbpe%8Z@FBb .ѠN>f>`)a/*vv3Li7Ôv3Li7fnts_7Čn(sj,siI*O4QBC'qI 2P;ԗI͝_O.}Qʌ0VjMt_DɾNnepu$-v7J%5cDj`WLb773,?c3)?c3<%<0+6\|fqDa#bQymK_TdK_jAC}`THtȑ"Ґ 9 kfR*+ P2SnU+ڀ(U{*,{ל:9Ct FҰHS;Ct kx*LkX qe3|z1JZ A4FoL%`}*V$iDM̚!pTvTlZb"z/!)a۲gh(sSa,Վ |%$b<i -Ȑ}fxUa[tV X)$ V>F2vJO$EDBy_d_ Zr҈.[.'MT\hcݲVPaݲ. C-v[Qb*~vcPǙ%0KA,ך!RO9|i\f4m=Ozog¸LyWg!@.wWQS<?|w h8]{K]oqU tYm~֌.0$/2upMj ./\1xgp|L $ 8͌ۮthwon:)\Tr;ֵD?5*KŴZ4iF ]oOs<20E6n*JڽKlsF"ROa?OG퐡T F0l|^و g@.Lʺ~G1~Đ]:vYPl$KKPLd;bU% ǴgZ2z$ HZ; Fp]EP(4bJ XGdp0[AӱɸXʑDv&c1%jBgQ\u0ؙp}:K;.C(s&@M,\9ܧ :k_ .&k gݭZ0)X $&k)?Z(e̠:.)qA3  ziO'MӭyM?<-T?av 0]:=> su: 0vu)},TOgjbRAwLztø8eڨyφ>iIY:gz%.byLM@7asv=q!<6D9HC޸SRh28_#z!_y`h#.f@{0Z/*^g7pioSmt:+ y{}0]pFL6xI tᬯ 8#$=#[DLBFÏA C!8WgIP{TsRš"tcD n9Zy,1!$ϩZ>_e\7!oL^dAcXQuԄy#yVXńv$Vc#8G/$X9MllLQpQcvmΆwzAi*^[%:h ۯ ['gd@y;)PV (gٰ(T ] вA+=P RSVpk{̮joٵ'Jn c>]zb\ߍ%*p:6~ Aimh}ўN|P;<ZKv``ܾ{Ey'8c!j{+9W$olgXq3iΔB*͛|N])"A`h"Jцmp7*{BTLU)&bE-:nָ5N[BMhw/;폻!KDE>xY()v?NZB4*H&]Ϲ9*)VJwQZ4!۬f=] "<[[97:XR,xjx -UNX4FwJ/H~[Nϊdõ7rIzȁn("4 ,ʰT dKZ&(Kn2%6X"& *~w{oJ# G5nF4Ugmh3߂ ʛ_|yq9c6^o= V[ې;0f/#JyOdLKdMױ6%^@| 1+⨪ఠܫhk=(ˬi:8QGSzv+0Lt+0L@)FP&HTT !pyl6q(_+l=( *첧/**{Z򝵃CCh,z⨇)z⨇8v(kC1@"Na @ϐ2FL #HYLgE8l(}Qv|gCi*pXnVƱ8Zi08Za~0\kE &uuM#b*vuv]؋/ٽygR[}L5ڽ\5 c%/Y,~*r(>@A䇕 z)%] uS.)%"q^/M%TΜc8融A]P;E#= >?v|mocӠ&q no`sBٽZQy<ϟAIm\d }@wh+$f ˮ39O ZbЭuK](eàEt5ĂӏO>FN3DA_~eZB?~864^> ǚc1_RX~04oKuo60 Qjyf9Z6J"s#W"9bTX:%|`DBO2I@Sg}H"^χ 455 r9? mPU ,_4@CLKaaj:W8D8diO {xb4Ā8CܪdCf{,e5nkz)E:$|}>a%n xi :wj%Q׹?i 9l7`)ڟ -\hktdž!dݴ꾒}d[rW0N bG?Αx.,޷҃f) vvd^`qS㶣c/XG+e<㦲ᬂLTvc,VKO^Y]9O^{صCO+EELBR]iʴ-4?vpr2ЬY;|ȓLk*5Wj()b_6t̮i* }~~=J|qYtJ%Ȣ]{薐p?lxxtq`Z uߎm,7xoS挐\;Ë%6 k[Ω̑ +ր(aov±CWe|q )" InXa[q=dgrM1KIp֙CM(6ՌveT^L"r \jMkaH;EL1fF6۪[<]W}vjXvל3"XhWF%j*YTRɅ UKHCI϶k9ڕYCP!9๚v QیvcCM)∵Jgo?+q9stP 5ʬ!r*Br@PrCCZHޮ!3ڕYCBP9(3]CQx^ʬ!\m JkюDΰ#%Cky4+jqIIHCNEC1˃O%3˷,/FFH;N6oF2VQ(߂r[h'8Gnkf[{" ݿ~ (M^sŗSکJԊki5L|Yxk@ѮV<@+Bۖ9f4Q;SofBJ<g5a*0t.*φX yzVv?xs=HNv3 .V's*}"KK V=ǚ1ﳈRRABW9ZcCf9'T vT'mǫ[ x*CnI޵Ym;,̴.SWwPVamI,BĤs+iIq؂X !4mq٫^Yc$?= Dh(EȹTܕ 8S(lROǾR &Y>Yvdm9UME8o5~,sIIƀ9њF2D0V=u?O@b&x0UjZbM?GW !Sx7 OgVdjlJkNϪ=_$0.w56|ۯש4 XBy?y)ƒ% \ iZ`DYl!CWl^_S\CwQ? y3 r\C˕J[¶v/"E_=4yfM/ 79GxD7<9M9PA;]1DbFgtaA3 T'h%sjloVօ_)4"bZb%U䪠ڪzȕDyk2Vk_B=>)o!í]+5n~=C<9"Zn.*H/Jkp@StJCk,A,A]," 8zh1} B Q!2_vQ# !䴦]c6|J]Tsdi|EQ -2¬L6(M(303)2!sO(N8>0>] 4.M6wn@aZhUC$sMZWUv0{߁7x|XDjgR1Im/8yg/0-%?JTˁ y$/ >*h`'hnr*6OMG`h6i,/Y. BrvñFY=?x!ƜBOptIj0^`yV Y+Aϰ-N]Eyx]4QfZi ˅8?KlgkOVe7b?!9yMRޟm[[,վtLxy[9Q__<3Id[?<<EpT. qdqBpdK1q!77 aZ~wۿwE б@X8 }橺*ZR!c)tme#O.J\ݷV7;ΡS"35G(Wyn6T]5Z@pt&ˊ\^}KU95:Y pm7Ap%lg;Qoܙ{sE-l*&xfl:LsCa$%cQpRS8Roz|$6h kB8ixyI1H_B)9ͩ,# 桚v'M]"D즧LTPGΖRf=̬جŻzJm@\S7*B q,\라[^qp-} n S#L<{OsjL869=ˆ{)5uκ[_^=^!̓?>{RF a3Ny]HqRNu&+\hY̷x[R:倒pI@6Zǯr7ydcF59ܶQ`56bٶat nQ|Agn7|1Zt;W\p]QLY) t8s+b~j`£Fg$Qy R){A"8-4Po}L~XQ $w͑8^!^Nr#a+ONx׫bfV8~^aw07ئ|+SBR+P_vWT9^VxWr⪀L9}:a8DJX[7.;Fn1(f0vՠSƹڡ7.=Mcz-{@w+*Ul{xEc-ܒWL=9nH䢿&=C9*s#myCRqfZ{R9Ei1k+͗2ŭbbfӈ};9Ƽ4Ud_GCVTs<ֲȱt[3b?訵bNP3GΪn;$ϠnM?8tλy;'w/uwoo_ΰu?C/|rxWf_AQk Eg< / G]ӹ/^/_ eK?7@;;x<-4͕7gn'zh>c@e҅Ϗ؉~*Ё2=.b?FŨ.{Gc>.N+~w΍)_ᙅΟtnAҕ?ar7e9ko(?}A@Й`px}9|1LD8˗7W0>^ Zc;~hMv6zDxs/t9(K̨%U^Xx]> -( _34g];s?t5DT̥ 1+9 X*/fJn[&c|0;ᄉ.ip?,NSw@{&`&xBv4LqB9Xrpcgnon9X|gpVxX,U ;"҆͌1 4ȟB z|eA7ol !w*Y*p/p; ߈T;oi\pGy6 _Q[_Ț62pħ\(Q8P= 0Cׯ’g%K Hhd5R,`Kn%߳s lFX|5Xg1 i攉D̤0ƝVbrϰd7c\xJw+2oxE#ᨸs7*s񢼡m%^h%޲8!3+ 2̤`L+"3e-сSkXA0{x^+&.3Pvzah帟f! =LT(ҿ?t[>+Ϲ_l_Yo e7"ZCٛGڄM({Bٳs`n6lʮ eWsek"YTh+R M 3. A2(]vsDN:ٙ*Q5#ԏq~#Rš lH5HA hf()&& 8匹kSéJ X+)%7q}k|.Rt E H q,~AI#씶tTe芬GYt *l*Z +v=W+j&/֭΋XaźUM6V͊I(wz#MD_r(`Ȕq3GK V,eTg$a#x&Xp{q7qJ&ƽ/I?V x1p-眖o lxo*n9/㓃ց* GSfI ![bRh="%9r~a 28(c{.\eoy(^rl-hSxe@mY "JYW^n<@2PDo_gZX1fP_TJrC8+?Tv0$m@'6Xdɔrr[6A$ϴKe6(*8b8 |ba[bױ7o/"JXN⨺/j%/ C +l{4L_L~"YŴ !M~W|k!k=׎5ڱ\;րkk i2!D B e2ɜ$>-a !P(hEKwՒ/Z1ҝIӯK7'cJ ɵWޫMD`[U@'1q': X'$cvNBYDX@v%o| J$_=K":>-9x@Q(Ɂ[PV/aq{dA`Z`l~d ɥ^0*(]z٧v?Vr͝'wYa^M6xt-гdܢ{ ]@# t6l̓Gg<+'U4 zOB痽`[.lZJ"GR ꠘ2yZ3:xi,%hfIE0MLlgb8gb] 0j4A0 c0 j @480(h u\UIֺSµؚo|;"o?:K$اG ;/8.}_[TWWB3%[[#jF$HK ^$UĶ4+Q kPP(nKm.D+79W@-6:O,GQ{ڈ%ۊKG&)8pSAX"4>GVLȭ9G)Xei}|ʆr6Y<%0i~ZwJՠS͸ &B&%8Gmg0ESc'c B]rĴ&G).E5z.0'屮c>rG|oq<o{)4g] QW<57DKA~# C*vekWQ |W5$YJn!Daq~@9ۿqH0=w6q{!!ѺM><9.-餄Qӿkp H.m3\Ua4p}4peaB_Tg`U!D=zXq3{i≺8]?-HOs6壿b\7[kڵ1\n ZpFo2 \ڰOs{bq *N1k=?\TҀOZԡ.+K5u8{=_#3`}<6[kqI^^5ɳn|I8wa\z]&{$mN]uлLT]:;D;!?q]8qFvIeEİTGb*iz8.FʞD*/I}GQ|{ŸVbjCllTbUwܕX?"o1h`7Glu#Py5cT;?1VdhfxKF-2kG H0,bೂsQpb^Z_ǽ˯>Ju/:&~?MZVybp+oUϷϧ߂IJIkֹxV{ e=1lksqu7nb*;娀k]GM5VI'V5AݾxFԫnTN6^f-u<<0E\}^ҋECaRG0 LdV'Deur+]Yi1ut^%fEjŽ?_1캤ן䛞kқ4Dž=>)lw(zԊw}?/wOii26bbf^J8SS[-W9咄E6I. FJ&d 烟ϻ>J1M"V|ŜE(z`$˙N?s;N?s<\09 M Xol f œ|~y{ wUC37#ݓgv :ݜ4Y:i/|y{t|Ð%WW8 _?&5gX(h,{d@($@@YRM2΅By[$Z8Z+D&+8I_A&W,:zy-u͍Uqе!P.,5<hS^(*&J8X@,.{=A 6e/\cLF"PZɂ{ 'IЙ&>Zu$3 pEVOψES:"W.&M* [FF@ Q`~۶g⯅IDy֔Hhywt52 %i$JPvԺHRGJȈjLKNZĖZ୰A)6CRXmL1@:O9賛'|a;C뫫?.40t*0x4ըh:_qK$٪,(H\R%l6 DZyZ vnP/ ]4m{&h!'wU:mbݧ}/RII3RQZ:^:@\RDZ@vOM;[j)q/ q׻>T7ŷk*\-݃\fB.(di(tս {ZZIlU\GPi8er8d%r+WH̀ShD"@b  X[Q E( &Lbxu}h+8+PIC$. )hユJ",)%>O0*BUS!Q6J9nΉH6*U݈pU&j*t7N_'bc'=$Xp4f8\ݠ1䗷orvmC yA fp$Go;qΐS.sy~!2}sYq2uZ-3!qĶov6훝3+O%j݈1+,OЮ;}Ƒ"}~.]@ g6F avM!hP .RAG_`㲳Wۦb~Ѭ <{L7O!9(FSS"AS9NK!arAXMK~^QQ ;9.1BD*C(/ʡm3AaLKs'g3ٔ!2^Ǣՙh sިzkK-5ȜM6{4>ߤX I,:|;u_K{yqn.vG=|:܆]RWLuv8ȩpk)rbL \6PA:fU1V)|nSJю %]aЫj44iv S6\Z)Ο[([>}8s }e'#6}u}W_,۔ZHn ,osY>wa? |wTq8V/l>碒ʰiJw\jMis2|7VuuUHҊUmhZ3!21ϼ  AǠQI#W<Lƈu撸 BpD~3@oӞ(Yu@ aİ0"<~Xu7)W&_)dUuc< ? k9%LS^A->ֈ|uOs1YV]1^ .ڪSx{3Q _gHMXP^MXPர׋3ca(5re?& snL<#FL-T|15ٹ>b^pTșb^{kbb^c1 cs džjj qؐF 01{TOuR;E CsR)&d(e~tQg!dCdjƋN.V' &W2<~Gok&IG&0"!HI-)pF'Ҹ*ZXR=}u TGW/%:)]H RG\^1yRQcIMfw/L57>kE6gK #39J6r3<f9OhmfޓxI yB9҉g7Ej_]y~#ߟ=c ?XLisBOA6viFZm|lL.Y`w8͛/z?ŻYWurTO͛[dJ!ieSlݯoܕPa3Q+,l>yj.peС)X9ogTFႧ`Zȓ^M>e-N$S?JZ* Xњ ~QdW}"#=uαn]^.(|L]wX 03x{˿bb;^>'YX5; W~3DQ*Τҫ&ΧZϫ!vP3Kj*#;s{Ldu^ٲ ˜z"$-̉AjM0F?jy0MWm4vqnY|W/2(ݨX[R(*vw:^r@lHy0lw÷Wإ(^43x'>4 ch~-АF1YfaZ\f12@Db] hG;&FhP-[¡J[)Rj&+aY/+79[Lx4\8~XLL(*~:qi f5_s?g,޵q,Be8;J_oz q^v1O6/ \˒I9CJQCq䘐`X9U_]{"s_nk!tpJ9(—?~JdDR3c&Rdag}ArYP!R8fRf"ZN^(^QѝHFu?Elt( =޻hħd4FIg=G&D"oxOyhtvPz̚R"Q`9ZDX@>y 4J<>$6$ &zɰkLn_7D_%g>0|rfϗSiةjP+80C, {SZuJkC<㡉o9⛌ПxOt%S62t-{{۰0m?鍘x~/[w|:%͘I)i1<>_7LjGn-?ڗZrG#(mHP9YC %0Ba\m Li"h*ϙLKb>1f")U$C"y Jpf<\ZEQ:8D(Z{Sq],>]󍹒dBPEtb jK'V(0+k{LۥEh5uG ނI=z_ \~]nZU\=mMeMT4IeA;TD_n FQъK#yƤY.1e~lQF=O4G<'yًrrd?~Ԕ1T%cnOgeYF.W :yؾ;,q } ݭ}]CIb`ȂCxDmdwʬ5JtmכcGNsequa~=g}Ԥ1 .7`yԤ10~aĬs'JoO`Rٛ6e^nʦ]l t|;%#5݋}sh=P<0ZQ6}|QΧ7OAI*Zhe'Rۇ<#5nc6J[k ZI6UU:Hң(nthRHDeN8 .6>zɴ"Um,BS03{Ny9`T}SфJoxWi q hc&RvAiDp[Y еʅ6D#QN򔢷1(2j6$W%=S'k֒ dۙFRYwIEnBGOŁ~H: oBH'tt5@雍 2!PPB{ߝѱ^d?s[z,OH+q}Z8qoKx 8]c7;Uo:*@$<#W)p:e-G֡Z^:E]G'`YËִj GE&CzTQẉJu1fcHQxtr|xx҂nanydE2IFɯiaT<`8%ﳟ,,BØ-7lx²K2+Q-e /*ßr-+/ ӁN'>[7Tp*M ( 4X" PH̅g,hW>%# SH4=5Sr&[x.gʶ_3 [{eKeU>sw[1CwtZ2\gʖwݗxnkQy &"kl\&#iHyC`"G'q;,sH! 6))x!6iqd< /$Nq6y HX2>ތŵ1oxZNۆRTdOKYh!\aqt}zH[) AE(>BC Tx,Px= /;Ԉw{si*㖁 n@ >@]T V /!L#WB^3ePb0m{]K 4|PO1}xT :*\)R1P& hQE*BH4%AАKVq>r)8GyK6 Hx=*4mӯNMXwH{3q^YcEڋ9h/A{EU,0# ) ;yFMN,蘻G,s,Ic105Pރ0 ';$ {s> Ic*P QVt*1%M$jl+3CoV%B3J $ӊ⫀#{Q#JJ>RFQFH0NX$b2e5m[WڒŘ=G EA`p6]w;kja*$q.,,qludǴo*|?k~Ko؛E?sZR Л-A~@/6qjo_bɂbC6 /jCo bH{_*@0HOtap.Qp2r͆ơ2Yue N\0SRW⟛fmݐCUSQ%oN,)sG,4J})A${oFezOx)JPjdjԼxs鐘iO!^J2xud S #URԈ@>/eꁏ5c/@bE1\T=b$yH]2cu ]LIgOݥVOiZH)L֍v<-4Kmyw1ګǯ]°233G+2[< zeg l$`2b0ޫ|K NO 9 NNIr؆oq ld!1ID#T*Ddy!HRH@%1@XsB9cƱ I`D N^`0`ph ׊6vBIȊ!Rn,Amw8#`UmUx[7ИMK!>rFkchĀbzn=VR8y1zA^-R(f̮ jM-ڂ'01.Cjj#z#17:֢wں1@OXw3hS*R`#em{MWnqvYuoQˇrZn;II5.V_-"Bv[挹Y!x\S~5昽ȫxm~=ksJ\={T?nrhM}U=X Es|R󌭔ۻ*La0Uo/gɣŶGN3UWe\mR醕sU38(??]\r9pQK吨`ivh򨰠{ щx j23z)AiOB $ I˳#P$)hI*e-솈 G; O!aܗ dH8lI,VQjPقR=!O+}ojN\ %MIePetR w!uazOz{#|\nΫ ^7?)s^.Rn 3Y?ǃ$&pLi:$xA^嗆;Rx{7}Zn2˵ VZj RDc]kGrځPE-qB%̫5}oK^Sчlg7S:7-5Z kTr6z>8s%4bl&)%DǠA]cCrsq6ܟ_/[Ģ&=̹$$rQBDtJ+O2.QK Szr7J&iT˳R2$%B 7#T騩JʜP  IMl \0 %VaH&n4 5OZwjjlmb^ )1jMI @Zk ?A8|X\LE4'+{2f\OA\acsS9d+ye%F\h-C9$nB$TO' g |SjX dJJe0-ۡ7a]3n{U-51li~{{Orvwin*m̨{*XnpLoi-#=x{mَag )x8͟'.auMzG&e긥C!vno A" Pz2:_좸#B8j 9h! P#DA8i"u- yq8FlA{HUZ'}}8_|PSפQA#Mzd@QD)+FkO>Wݩ3TG'~ZU"c@u-=Xf;#r$X9hbn҇UvsFb˒I8 7 7J,p.~-/1DCmحa~YsA%(J^X[^B:6uB ./9C$#W~y 5y1J׿z41Z=J}C{ō庯?_W%SN{$.@!nag6g7݌uHz1&5GYHIumQ-v׻* t9c"v63eB ͵9՚zEJӞ#/WhL2 yCt7Lͦy^DӉ"!:t> E>gHܮ<1r'?: o@f_ßncb@1{8 >zzbq;}nG4MC _\`+p72dao [z-wsnc0{0OEoVD bC6I-5%d4՟7rw2_?O4|5uq7ܷ|0Ifϯ^{i?eX矶f~j{hấF2G\<"/[@ !\ uF@AO4L= @)͸Jb, ʹN2Axq~ T1޺1,R/9g|zs Z{߁T M `2̙ 6RWk?b{a:1^ϲrﵟ|s.pCkg?$2.sDh?C2EQh[߶ Cv6ݢH#=@JR^E@Uր: _&rRR]]y  v76z{N/K\LhtmW\FMf ^-8ܰ>ry% o^G1Z<: 57/K.~y-/~pkII)x[YE)ݞo!>d_+tKZ-n& Ms#όแ TS4&abcrGt)ǘ:7xfZ 1Lݘ@Sxʥe=П,:Щ1m=[ Vʳ>3|?I܎˜8@Q@iSS Q@oe [mYddR? qb;[?&U+/Feo&2jM+qP]tP&=jٳkYш YCse>YSYT'ymo%DGͦ/&5@%Pl]lƾ(ͦEEϨfԁB"d[UvM8gDRh`.h`Bkh`:m`T=s3 :jHj~!س7ŻmRtnF0pr @?o8r-DEE{9E *Qt[JEt[.d>gMpIx|h6rR('9F4pyhKܴ_Nv' CN!SKy2dqW)RZix3Ćrg'C#:ħq~2tޫ!/Ʈ;o2b&NfSjmUeN#/ _L +7*sM!k~<~׭\Y1[o R].+8&zOkkҌxhR<U/)^݌0R˓gmg3lLy n)TV8{``ːԡ%z=*mݓ}Z| R1iHUrR<+O~q >5͚E[yEhW^ASv+7w-d2u~S^?>wGo`u(8)@1k!%v?)"V*>3E'EC) ; '' >>`B}qu\aj$Upӆ*К(GIqVXTKz%N0otQa+MVfFc($(fܧ ^D&eGIo9ɨVSA(|i)q)ȅ&'; k"TPv-_ov rSe2]OFohcQY8mnF-zbnEh7[Ec dՠH6.`Ռhc8S\h.BN5 FewwBk-{׸Q|;6 HD-)-]S24BF}A-Y-Jirбas1ZEqJNM G|љR)!!S~Z"|0V{0}?XQz+zWpOmHtp΢xj7%D[STasnu4-Ui8vk hU|,ڄʝ3-ÿvk*t:r{|ڭ1U[h#*/utbG=hwIXDx[S@+k8 gF<%i8w[STasnܷNZU!_86)UBv.uvk*t:[JROZU!_8n'k$KEzl[j9e2c"dv3ޙbi-Nv.pﺃ5k/ ~IN7{vj[g>{3ɮ˪ Og CL9?!B~txBRԵ$"3*rTDiiDTDi"W"{G,Z& xK2Sca!VWoY,b #{u-{HҊ ^a44j[n@͌!.@f~|8M!5ZTTRDc}3J))D:#>s2ȥ"sV1RIv#%HVI!&gk=йDuazb[]C|x{\ɏB1vjP so SP`./$PƬ#Y|rf9+Oo޼*> 6oǙQb?Q `c{3}|5n]wQ/Fyš!?l_{G76w<&OY| =7!-}B0&u^]fsz-0-ބG09+|w*eUvU.5uh{]Hi0us}]W(y>6cX GdUEqYqUYJk ]+~?,(%rY@ HhZ<$l%[l3޷&g:i,q(gk.X zbad)5_pr& σ8;wp^ndqЧ]=pJ$EWH-[\uՠE_<`;[z4(ꀭ%1IfuAP{$&NLd/His$:Ԑd/stҿȝzk/lPƐFIqe2fWT\Թ/ MhmG#bҙ!&-., iC, W-,P<, Y!KwBF5h+҈B}2ggڢKX"l\*c{&Q"Sĕb3IBBO`[ bwDü%& OHX \FRH1&HhllC)!BqbSL-ǒ:e!BdDhnC*RWh}"ĀKD0xTa8|;3183/ SiJڃi>П: 3dje@4}5u8/Pƥd@j)WZH5CۣkNPoӽC4) r:MJpTk}3Wst+ߗwތ>ujOڸz`Ts` 8==& v3PpEP0?nG0׹nnzk bKFd4OEnADCD_b?Ci݆zA{~QݫpϽo sR?{۶A_顽(@M0%0+d98ݥ$k)S2Rh#"gvٙ B KL2Td{PqBgX/w Om .`Њ)ײ|źL:] θl$%j>;P էeMr;xWivK8t<J,oqjx>q%( 3#[0ވ{M 4.V@%!Sx!Pzma>- 6mlSu* hց )c1Ď4Qjǽor)-LރzGKh&ECNa=戦.bExr3:LA!4qܢF)R+ӥ&#%j5wlѰ gXP6OgkS i Q\ iE8^Pl >zf+g|:5c%~Vnٻ*g7Ks* J~J*/?0j$3!ULK7e9zIӽc]d7*nJ"؊nT ì2 xRq-G{@duwRh`)V.$! zr?ȓXņpCLQfTa%g@pzW0d>&"gj ֜s)9X#SG dF;0p)z4llzNpz5B0wҬI2O b&K3Wy6$:n1^?v?;TŜT9hy 'k%/SoBzHm`K_)燳dBbs v')N0z _O"9Hp|D,X,nTMH` 0}<͟m mK@`-v-#yE N/Ԡ"/潚V[Az8Ԃ^AxFS5++dAIHpvP34}Z7_$XrVE,9 JH.љloE|8H_6# 5m8dɔ5Q'99P0it-'3sn6~V.01<K\% c ?:jt[pIt! W<,x8g\i֧7ApޮowCޘ~AڇlgC 63pfy1_m1i0(3hf|ߌbc6ɊO3] m@TƗc4`@Ap>{/DZL3%"}gZVbhww\ xЄY؊j hi*sV824 Rz ɫ'L2+ Wv-M+ߓ.ъ?2'OoL-ngkw44 Li@B$`*Мϓ 7?54M lZE\5qߛ>YXBcx s+vɞ$0%5dE!KnמKmז)ne_̳Qa_"7@/xQ?.&D^95k'@Şp9p#@ݑ`W=,9X&;SIT-hbS5 \/Jrm܇BMD_ǺcC;kU G%`E76ejJ*q5Q Io b,=ԊٻW.W_Aq}Y$&w/mjN;y0U  E Pш%D%zws\৷M^a|C?9=޲M3=W*P*]z>/KZE\t,^@/ ? ~ZE+&r܏tWz݊{wV:ڻ^@+; I9.>g 敯URXPl:TqVbGm'vhJqB)$ωQ/0L4q;^*EEwʈsj) _˄"u)F9ƽ'xҗ`lN[g%˅a-[)Xܷl<RkgضwK1ҍnw붭+2AtLX|M]oopuߨS-V+:L51иƴ;H4}\}x%`PJi'1QB?QڵAi]vmTMȁJT"V,J@"( E e)eR%i RC1$R$%_0./We'4+VI] ޗ񠌪?-v _~^#^eYx3Z׋!ڢrsA2E4[ڪzX0f776 [ER`f\ ݞpco)q4)iQ#P:2lṞo*r>F@J4q4v1!ͣ!L`t?a΅W; _RZQJ9|ns AaI92(,?7mC!ĀB8PTq[^HiԆH2kqqZl2JYh7*TET8( ǔ&Q"w%XlZQz9(gOl笹ގw~dGIB7&%ǁ$[PcIX,7rކj(cNU񷟯ﻋU-^mp z;h':oqU1}iy1tO {RWT=9M] #6xiPLSOb:-:%=yӪ01@'7י 1*AGNo;7[ƭ oٜ@wWĠgצ6unqwE<_9ToA+#f7W(%_Hb/ip_%f1|\dLv.p$ї(1'|;\smεw$@c4JeSTPr@1gQICC};W)#5Nt}S˷5Tht09uW ge XtN]kv ؽ6`]*!LH bkL~Zy'q4$QBbKDv"'t}*. T:d}I9ئ4=f7/Y! =g#A9#>k=R:bg}|z8Ĥ׶O (((!P$ %U SE4a4Id% GOI CJPe@\% /yjwkeî0aC՛!Cx\upEt.@ qtGձmPk28< 8,TB= !$1 C-Aɕ4 ,nc k[W2T k-G꺌,ff暨yIDɀ~ċe}п/GT C8F=E7՚^ qA{ >~x3԰T|~[F_j`P;9#kfIV|jj_L&~C6mkp)G3@[Uʻ\d3V,̸8,ܰ~)TgS)g7Kkֻ0mV&bilG\pJ“lœ֙y;I8{涍 /[[ >\R{ٗlpJ)Q&)V@$i[/ tA KݝRP'rp/  Sj"嵲%U7k\r:]Rҝ__+8Bq1tǠTnYm"p|ݦWHAE!S,_k Q8#xͬky۟/BG8T:A$BqiHq0Ÿqr,^Lc#e E\Lke;U- J"β^#:w3}XkQ\ (FirnjEpV˛kZeG8?ʛcqهCLKE${.>UA-K/UݧTJ0Uc1"+f1ao? FvH bH>t''h~r$JG;ŧ = R} e_Ia˓v7u2']OiNIoWCpgwEC 1FKH8@l{}1BXB+&Hq<ܰ;굇sȖW ܋Yyz% cJ~L{\w} HsKUcmiUJ_F"ڄaHPQB4LE*q8qZD~`Dٕfצs&矝(tvۣU/c}\lo.ӏr PBWǚLk_ kMa`V ݱ PoF ,Wnj-Wձ(9 lO dPyc2DZT9ߺъG;V}Gbnx)jl#=w{DQa}[)aESQfaATf`POe\lȸh' {ATvӥr!yE:*b[O:xnGf9}iB9Ax?a`Oݳ:K-Z!/tF~ +$I9>їD;=Hvn{[gw pTV7f`ކiyFn=p{7 y rDG%Czϴ4R81TN3'zӊIG63v.II *\W:~}Ƿm֮-B~t":W-fU ԘԽVDJsū=q⠤ yV@9bTx|NL4/bYj_XIx0O=.?iE}:ϗ_p}vc{a,U~ j}>HT)+=iVХy+8Pɧy8ΈYHO9|+,$!߹:ȔҕjҚ*|U44p?Z,o]8][3U eMQk| ҚTŸ3#8WEc2E+/,htF8 $>:>}>R+)k3~op/j [}z?֌˫yyNΙT, "`R{QsDi\"][T5{{~Ö0(~[? -`\-"A}R"_OxO="_ D8qwVHBva12b<w#\DɋX㷑LL30Y`z)#6+x|cʙ͗*igv丹#*Qsg,þ}9VczN> Z;ΚR+YX.>ٶX,?gBKM=P-(3.xp L2/`n> S>-Y(əpkμcpm_}nUjτ[EF'SS0IQL꺝<чry{]0L pyMZ FOy-8J=5jYlb!bDAWđ W(Ca\BotFy^![FWԇ#椿 vz,ܙ:Зqԏ<2:"bݟu=ܑ++bmJ^7|CD '&G! K(LfID#)"iLPJUJґYej7y($h\N>=zLXBgOAjzJk4*ߒPYs,s:f Ch$WmJdG'߾IAVf٭{yB,SI1-%(-b7FpP7RƎ876HW6 CSTEiXL-ԍٽqR b eF9^)>wF?NPTx:SlJ"%`jMH)5ް ot@x7 c⩹~#Ȗ3>Zb#cUAΘR}*̺]}`V F(h2H[E+(Ȟkni+*kOX:J\w)?a xZ*]:r,CBsu)8=r=_DXkp[$ QU޾!\C@q03Z܏{c)?xcr}yus|(aL@bLD GA2jFA Rm<.01:uY1Uh8uϳb႞o & ~xm;$mSg M;EfTx#lQ.z@r-9M[8CbٔѵhΒJS]ݕs.9VvNjUH%쓊Iolo<N g<{D}3㭤Z2r3&}Df: g#+:\Ggb3w$=+07i i%RxD-TCmA G)]:a`x#CS|vɾAƒ7#fkhho{0N2-R,˰H1IY q(fIS$$F'iAWb=i&2L;uFB^a볾.,V>H>ȳqrAP`R0'a"4f"b"Ga,c)1#:B3"CuljUY_U=0N%jIy6K F>߬wRA L(R!*D$ٕ5*%5N=Ա{1p@Tg]hisZlu>}Y;Crm4 .W 8IatϹYwXOvpaq'^}v<b gE#]~YƧOx}Ў1JaNdJ[>yeq~ҿm $ $՝#0eu| dSGFJtrbiSoc2ӿhyAqtc=,<=A?>J+#WF⯪_> ^ ҡpnTY3#™ Qu´y$)qG1ؼb̻I$/\_bTvfev%𗲃&w%/,HX ʥDS44<܏fq7Y)4b<.Y4-_=5"c5pZ{x ȫǕV#^G@1)ؐߌeRGrSPа1K_m|>Lir\ *Gځ+jZRԚ?f2s5t5XV(lb $a$J5U0 L$C1Ql<2/%;k9iJHX(MDQ Np'G$2DSI4Q `xѼ 5sK E_DK*Lǰ,](xlTl~+(n#lFɬH^4d;`<|-[ /A2[Nmjdz#oWnkۆL:uÍ|{+;miӲӆxƆ{lSbr|QScNT\],+\u؄W7lrȉ}_9OI rBkqŨ /a  dev%u{n] ߗ].ٖˤHQjG$SI~!y._,Yj$Z3s{r-ʢNXhS ^ Aʢu,@NJ"y|F]ץ@+3, %%,Ҫ.J"֥ƸYWDW1lu 嘤qZ;8'c!7ȭꜯ3+zD34CMs2^fq;I:iVHnPtޔPvW ׭>H }5Q(Qå*j"PNXUI Ҙ :WuQץ Up@CIP \ES|@,Q`ǒ1$.A2%jM6&nNh>m÷!v v!!p FZGڍ}@݆ZӘˈwTD)č,JaHU@/ʢno١?6bVPjýV >ﺕH>ZdPxI?cbY/!4{X-ShQIc(J(tJbNAJTQ8bbQ<2eOEG5&66N%q[ºmÜg&W7\2TXo>.~/ Ǐh(Y,u9Qw+s{^1%[ 20D Sy)qW$;+M.=T?gty1<{x{-F2&c6DmrSHءr;m/+LSX{2d6Zb-T^ p>P̓BF]iŵ[0w~ a|dhguk\t=uTm~M#7I]'yk_l3t!͘surۇ)߃QSeat,R7tE#!9jy!`:sHPf|dq)]zA3K=6kCsLrBEaܓz@Lt΋MH/>#asYNEw`_buFBc)1/ǚTPq.#dD ̈}(T1WpnȨE}k1ScqqlC&X#N?8=M 0" %ָ=N$.Tɱ/kτĽ {]=٪/?&l1j Lpb 5)\(@DZ2PJㅤ+u^d$nI _?U`u- :WyUMM 8R`fpp}qβ 5ASk/Ű/aō+)`a_eجkf a*oiHP ʱ-5~^n1L^9\e|C˨mky{%|;@$w.i$zkm՚37͎&#6I7R(M%IlELo Khlu @KV^PRsRJ<7}) Bl>ʿޢSυ[II6) Ͼ>&@W![f뾻1's`[l^TH6 DI8 =!dU~*F=QzT#+WcSc*M*NnO4/:!*G$ z@0ȣDؘ2@u(#ˡ3+!W);Zw7;ud8m7B8ttŌC$cHHNHUnO$Q԰q5Q~,{pw[urۇ%j؋Q{+>8H4HW0_-WpOy)y:_ 0짌Uxz^eqcx깴voCXSYgAV䆾ʰJ dC#c$7W-X.UrJTY*e-l= CA5 4k+$3}Wv*{kV Q~on28//ΗѰ|xZ_ϱ_٧M>@;e3u ޛ<o<\a1#s$i& MtHy?sY#62ݜ!LFi% PA( f|!| +“6Iukzǭ:.ŠO. 0u3PfPE3*3s(JJ- w<; TՔ T`+]/RF"֢u]B+lD)ֲώK|t1_@΄*L3݇({ɩ$i:;U] j PhMb1-~]\FE4:jEͱ9fڌTbЄ!n3M%<)G(nCFqy ۫ԜNR(5RdTXI)ZLƫ (\)pJ.ByQ"U[8QVӌݗ۝k4Qф!2̶b " QDr9N@tIHK/X49g': HE*j6 #t ȀW(}*(i^1@r^BFD!`CQZc ҬOcƬ+! n0,;Y4+בÄTRq?pWNd\CДUY-1rRP#sePH\L`&5";ɳǫ W>[K@C#McaE)k}\4an6wj1KG$xOFVM]^i0n_lPnPB/olwyhV]w@ૣޯU91D&r l"'> l$zlBU#DU&5*y.̴3c5;yBM<{O\ĕx\MJ(RFz΅\"VPE"Yu(D &s0٨#IH7 (f<ˬΊ2֠ A5̥ԂኝK {1;3IÌS= aLqNyUTG s.u1"+I~EHueKݹ`B.1#E^kG`R%)rߓ('IT0%m$ (81$t{K<퉑vg35 FQB/2Sc;jM#_PnwB %\-ǡX O<"~SW(xfo!;wn!C}!+"ca "FB":5"mEtw֗tQ,Jm4ͤiRƦenILҤբ0#Q՞$qy_wF;wF _?0,jV\)&)* 9noq!FDՊ)-yAt+ͽ[,Jsn/:?-_`Ϯ[9m 7o#YwB 3wqR)udB='> P,H궥̄!g0)[ 1>.>%1>.Pn͏Az]8]ἡo%T%z |&#;3.рܭy(Aկ+cR.l9>_|q%+ 7>KR P'+!h--T)UԃT "ͱ'^hPvF|\tKoK7]oYujZt3XK=e'=τSȬ׻lv~es:i)%45ajx&q0SPI4Wo/FKCgs[s@K{<[w>u8>uu3SPLv^/>E\>ǘ!JƤ4\7۔5uk ) '|RiQ7ø=cQ@˰;-[NJcŝ-f[- lKIgMk}韹03Ǚv?o;3ie-זQƮn1QiTg0wA5t ۴N/c'$Fc;-\]XY[Glzҽ]_X@ $cw H^AP0' ZH+8Xd_ {]Wp9#!*$u"eD1bbƴ=wx3JFWcL֮>iTCտ7@@1!{'!L]>"\Ϲ!v{c6zc>+a$_oټe7h[۩ր6+U^V'o+|> lw ЋCV,7EKT rR՚TJAmӴu@4*Ťfi fvxXg,9C;WONv9ֽOuՇyygx#0ΈVϗŗo5 ۨk-Qo>2p 5eӻ\F[7 =sCI\U^2v*E6#OOj/wkd)PlpJvf3V0J )>Ji"PYRU"H %BI#^J8+"+$?,4 Zy!f=Ug3MHo6߸85r1x68Pr}|i>`E|p+~Xk Wy廝6ۯa[^B)WBk$-6,PV)T k,q90gzkfW8{ˤфjQEmՉ.78d=O(,IO"Ngѝ%z"e2*dA#7Ug3gtjݍL퉪0"M~!Ddr}(2E^?ӡ+'^0pCt:ΓBXI4 % {LGqщD66퇿˿ldl}^-Vcyou_\=l̦l*+ô٩|߄&7I^d4'g^fуٝYR.ot8U,I >8#$jӭXe0NqZ9,$j,xJ1僚.i Whթ_&z< ݵ|ny(P˚Șj(UN S&NBDh_ɰc7 zu eB6souKyYT;/Wk>$07Fֺ4Yקd.]`w?|͎i^q4n\^Bnt? QvOvN˹Mu\!=*CJexr2SRK|ZץxF$L'w- :?: r~?jŭ$\qy2/,_]o 2YF?gKzv>?|wA ׁ{QE'^.sh7=d5?ՓMnSpȧj=xpqt# -U#O>3:<8Gdǟ~b\&jq wɍ J5 HPRe X?EԀ,J\*պ'7jHbY|K˒`AJ,:Z8;+qQ fKYU=cܕ3}ۇi:I PvFcNVuRXz(;%ey1|`c d#Er[P̽ٳ?|Y )jlGKD^1 q.P(Y}+)F P;hdB/rI @2 葨gpPL@(7њۚ^0+aqV%ڬeidU\X lUa>aBtͭ&#:CMl&5;wAbf{meNMrTvuM!iKtZ+*3`Eh(&k!gJQ#U)_Fʶ" UzRBj>`rX$^0hkpd%^MOvnM}0yH 9rpڨt%@)H]4M(LbI&C$Ֆd|xT+֓NbU ʀP'N 8X~K/p|,p;Ɍ90)3 ’2vGgvS膔r^a*%ck,Y$Q$5D`kޖ` v_<ѓIQ܋,NsU`&&,~dcϔiʿۣ2єEȦr5Yq9A9Zҹ=T8D?3@hp"Npjv6v4: v}a8 K+9dQP0݉q3Z͏ ;z8Pjى=8[۬%ݜ3oc.󫶐O1YQ؎7|`T_?~qbL>F6sZԚhAZ ܬΩ>xjpJ@B,}|MԲ&AȫdiCT/iդNIUj >(}k1>o&sj`zǫzH-SU^qOm8 ox߹ d6V:ŝIK{;#x0F4)42G2$mf^\ڟGܰN.|J5`bs퓝#=\RI!r4[k =)Vw}/\E YoRΆ{mwĴ;6Vmn s𵙰ah۴W:U'ׂSF=N7va:J3>i >Jw4^s_·(i'>?Z_[-q<-X8<=o9Wi\yMwԶ6Qۦ;n< ]|j?Rj=ɿ!HL)ph\,Ƕ쟍 }POr0ywjq6Eog?.;g(T)˷p;gyo'IGCYKs2=ymݖ$-0qYnIdsM*@Z0z`}V jT! ǝLX< *2t"I&1e%a?EE5Ò̭ͩI*vPl$꼶8@ :6+lQ\-J*A%N֤b-1ۄE!qr{9)ڞR IF>VHy$_zA/ɢN%쭛)gZF}:ZHKJtn}7Xv3B?<oq',,[/.7{ƑծAe #<uXЬOv@BV@">x8ndL gQmv=q~)?̖;AAq 5~ ,ٻ6#%u,8tUwJsV|qN5O (N]X1],@t`====z P~)幍pQnN0T9,PT5J;D ~~2lI{^]}pGdt$F엢T~%Q߆x*Qz? pwWkz>}uʤ&-#WUO1h2ϭ=F8O!-Kf(/EN3#*{QŒ 0ѿq(=].ݓ]y ʝ@fZSJv%yv̼_[)~4Yx~)56ڐCMN'ZG &`7\3 0[4+VVb9DQRJxu3#3,`sʞxԘ~VC,i, ن BX67Fz߷ihg%q&vIp$x3 G&sCcGL+LۯftkL}'dfNB+Y/9RZ:@>$ wS G 4OgQKy18!1 eU O[-Tm FSneys[LPPLUv@)g"1@oXNsp g ȵ$WV )^P|y *,:ګt,g3 '\/߽%^DM!m3Fa*5,n`fa^3ڥ_ xW0EaDeKP'7wnK٨\_x sŚz ĄRExOiD`rTߕ3}E~TT3RY%Nf"0Cvy4[Ï"-")fwKiYXi+ͥ45?G*kG3)"}+p?*]̿󎋆\˓Aᗅ3\%N@xx 5ї 0d;SYj,{~,wtS*lƧ<8D̞“|-)nO^;cw 蔑#y;̨OP03O a9(I'Cγ{ 7'3!Jni@>>?f_IA3t 6=@<sO'g &GH ":|DRG\m2XXCd4(ZIlK0kp%M߆@uKO!"z&c0=7‰_}r'%%eeCU0ݹvܴjWe6s?WIck RK?cJ-p? WO?\PnokܺxsNXm:h''NDK滕K# 1hGXPp1rA((㒘TN D,A6t 8B; \)rW|G|:X$UqJK%f}M-eWww_"FHd?gU1o )5HFDvmxW?xT5L?f2aj"SZIqj=z_Sv`%~iL3w^HunJ,٨%?SfD L72)኉alL1WK\.h.|S0uX kRZ!X /Ѭ.íUÊAK w~qQɥsen6Rr$7m<a qꤝ9fﰵ{#FfQ/ޔxe[mLI&".u#\Y:Y a,T#16بrUVG *aӸ!.kBc30TT`bH#Oc-RaMA"%4AĽ<*M!R>OQڌs[X&lHF{?QSaRk:[C窿A̮ۮF/Tx)kpnvEF'8R\w^lbiRl2LqC !tn4Q qME+tkF=90ѪVʪ<&Kϵ8A+}LŐSpn[[zy(%%,ʜ'̢d7MaXsKǠ [` 2FSiV14JGTRp^-e;te4%33+!_r?OoH Uvh%FL5ZInp0z}$X6J5iA5H SϨlPhMS| $h ڽ*DEjA5עjD5N8Tp@>p axna.aL Vx#A6D/鏴`űrpH#A4"4蒄;L5EeNRÈحHfئZeR]]^;}~2 kpᙼ-Ћ7F kRC^ZfS"Sv6SMS"b6ZXgB3EEϵ6q}G+>ZnB#5[թI.iIr(D,CPfDy\ D-8*ɦ(4d:O(_<4e+j<Lo ml@@vWq Zx,y+o1 @~fQrАp7Z/\ҐXxGm>BjknB( $-Fb;w/Ѭgoe*">G#^xso`gnH9glp&xƒ[{2-" J3Aj88!GJ%|I/oRi!E#"E킶gT;iהbF(\ e9NGh&Vlc2,k$rM߽O>u',erWhdc%0}Yz!`)F}U9"Y(6Eo`"@#A%Gݗ䝟Jd~/ " 0eᡔDRj̷Gًfp;KLQiVP\ʷdžD @OgVz$'R#*7͔;:iʻ)Y[ö nO4)¶5uX&lHZӸ2Sa Q{+:ٜg 77[@_bvP5zU8WC9s *2:)jQ¡^K!Y 1ɟَӵD.ImZϮ0O%ZU8DQ1QhX_C{ !Yd!\P`Gy(( E)@bӅC9OY8$23Ӹq}dCK$>5*DlI zKJFSy%hM9lh2tO/<:GHOH7FſQ<|ɛ_ DVD v zޜD:[F6/nРJ`v'|_l4-7xqϕba&DpdB̵ƆʵoRv4J.ܠcn+,i.IMIso1lW|oh<$JZԱܕ@o r ڢU iܐ"q7eMYhlFK2ptHS˅m'1sh)%T|}э!Py@k8fHfq{{;>Ygk57͎S]< x޵")U/ !sqc/676CR Ԑ<I3=cz-f>fA&HĨbZrPe针VYz!NxIt=+L]r>nD4}hna0bKDpfx@d":⬗!o؛V$MUIԠW[F\jc9Q^6+W @͟Zi0#UФtT d[Q)mG%#TS Xtb*nܸ_|> K@W\B cSb3Ӥk ﹉yyLRplCLm4|wv`+)GU`½u(S!"OPhڈouef_n٭s:+SSM+a7W"ǍZ QkƉ FI$8)gIy܅b΂?2L%fHiV]{Ʉ3DTJg'zh m4X S&03BRR'+M\$7;6-s"Mze>>];T*OkNrniަKNsbZ,,yC3 LK+Ϯև:Qu-5l<$zYQ**& Lkq^ƙ5-)9`e9Xf* ט e}M3B,֤Ùsx ĂV\V-WX eԪJ;l[]DS䏠YS> oۅ ~8h r` xr?ݖݎ8]߇{ >w18u Tfڢ4~LA6rgyRd6% vtZXnj=k*ٓ7??C~ {|' ^ f b/3Ȓ~A6/գXGH#xRcG sM f%/1^;j`5 XlY'1pKOzc*]/za>;jb蟇}_5J S/3GOEYr~nt& }5ؼ>z.fٻG b.+vپ{c ^}03ox#6[V\}≠(f9^=F3 =9/5ȴ?s9kps{<wb޳cq_ׇ_1LP[{0j C[ |c̐k,>@tho^:{l=]s'=}jp~> szq>g'c˱"SLi/Fs:'Zus\Sƶs'qv}AByl~|x(, bƕ cٲb?fzuf/֯#@EnWXM#7Uaн_0t$O0@CxW@2^`,J2PppdR^8W(8\c{Ft0A&=wGPHfiF$Pa W:W G"w!>2ߞslK;~[Ho )@c/-mHtRFB0|b,懏1!e2On71!m/ @f.m kVޭO@ɓi/4]v^7  ;VgXB!{ ǐ" D%F@SބEMz@j"籇օ9X/n`;vv)Qcї-1("Ǡ6MgU=ɚk#Fyb8 Gdm6e1Rǽ|)Farۛ84ɵ2:8XEG4Kn},]9RI㏠;wdбaFQ^~mcO9ۃ77̼0sp)$VimB&*$`Ѯ7l $گMO[=jnf91e,ׄ~#%m4wQz[mإ36\̳ZXL1j,6׀فorĎ5OɥAp05/1ny麦I#Z,_l"x8qٚ$'ItG v^8I8~zIp|l>wMVipSsS$!r@M9| kWsAt>I&A\컸}lHL^=:_=v$#r(%F1/^B7I~4Evl-x|l1YV8܎uzc׻ !аT=D>y|O+I{mN` 3I;|wxKwO >dVcZmNH'Mqzm [fYWpOғsҵGK@P%:&%Xi>v+w&A eS?~&ҁDfP_]mjq[0έv1Y]:1և.Qb؋9 BǯwI]Ȫ/&ȋ'/䵏yO$vg2$UJ8$^t'>:S/G׭;Cqc7 gȋ=+aRRNqtDPa>RemLk/[/ pKOtɾJ8.EqW( "CҏDT ǰQtJ)6OaJ=:Yz:u%}':PL^EDC1=asճYڔ2>w%}d2J։x098=%مbIw'3ɴ+JqV?UsZ /<8~0>X_]^U+ Fu` o}Aݻ,h'۾ ΔֱʹEiDۘ٨ my۟C`[6v$ͯ8 UMlT^a1vS*2{!_^/k@@S +Q-53)koN3Ie N' s6)iu V!`ɝa7Ҝ .6s5Az_U "*_ akAq.j$ڶ{SiGmۻ%KSSZXͬH#%@՜x!+16jъ;M1jHˋҸW|Ct]߈g+Ӥ!zQs)щ2qfNFYqUpڨNJ8 G [Z:::?}ԜuQG姏ZbN::>}ԊR$GMjH:)!}$3.v7mqnn܅߸-.7eYw⿮?5=).[d/&X;kx7MȼA7#o&cr{VAZfԵl7߿ &K[pgas[QnQڊ_ 9s)FX7Z>2ZX BD'][xT[|=к3mucZ-щFo8! Y @BB\Dd~ܮunN7X|d0_ChRHșhLM`~ǺqDGb":bcN̺WZr"%Sb9]Fu Dtbź _hRHșhLa6Z]w/dʒ3 (!N9|iHT9.9r"%S[Z3kdk:u DtbźeN̺şe n)$E4J}muӭ=-щF떱^8eOZr"%SyǺ BFQA~uYW.HHD-C2:'ffH QH66iR3wzf҃/{!@b>pVgdžgAu w2g\r*b+rW@]>#]hj!z­%k V形ާ o5.кSe&A#D g@S L*XD/!a 6HMtZ7\4MtA%]0-|{=&q6ϋ1IbA9RCs'GՊDmzu)YMZUAal\w…>Ӭ ,||6ώ!A}>z=LdH3;f0;OQA6sl"7[*'!Χ) -"|s;ߎ[ F-(M-wubq5q zb͵wDSpjrd2,Z \Zx 츳_\r~8kVxBw0r eXˉQ (,=Ei΢+ ), }H}]AH}Ii BxrJ4O ӁWSX)jou9{ 04~eS3vC3gYR4#~ cɆk6݂j9ȗ߳נG(% nV:᩼NhW&.~9C"X_6fܐV47SmB',w&6*T6:̷53` A+'ݧ7O}-L[2mq6K U=,g`) C#b>#;w?! RGm%G=8 Fs4s w|_ _IzwwvoK9B1F},) 3nƒ%#$cDu\ gq|4i:}~,P.,j- >x >rB3О+M egtB4Y] mD-kx (Z3Vs=uXC#WQP NZC1MX)N - rF ,57;`|f; ᄉH8lMMgic辝&&MH)QMsUCuw祽yU3Ǫ .ӓ˝k2]y߿@C9"y=_w؟LgJ3OQSf`ό*~p"mW BQ,.ЈwD)h'lf &xQMԖ>}m9GBPH=;JU m*Wt !%"[f"*:% ɗ>_lmߞ5wr*24*YhȦ9m`R3-aP׿_e.?zaz5yr~^7/3;EAV/L_NF?Zq צ J֜U8&."=yPM\; 3LUkeg*R#&ej(Ei}4,Wdʦ_1|nxC IpKSgwM=WlLX}i)Y9Ǐ#MS?v"IW6hQ-I}x{m4P }&T }KcĒ. n i1hx#K |L6"A$]JK(ԃ%|#Вo8{g & Á]~^B8{OV( ^K+yLo#!ύCM(TPQo~ͯ!Nx`]J+:Z fxԇhI]&վ=LHT6F`cυPShmMpp)DuA:Ía fh76v#Gp7l+$RY=J +ErC`i5R,i`Vav[1M!<Y.!UiFHb.MTipzfw)Tk0k> /LjSl}iLOr;Zޛ#LNR6%o;x3IwJ,,PkáDwW[y gfs taNnd*(S]svV>;/g~og,Lz^jmw*IHsT 9 i--_}lCQs%p."vn}b#0ip*cJiz"b2)Ѻw:GS'_b+k;.bQ+Hy, m20Gaa f]DYܨ_\**J gsI#1TRRIk RI-1/ԃ6` ܸŷ0Eެ DӮQ,f\wbA-!<(ːQF\ D-΢rD&XT"x:_o q[Yoz}Hְ\Wj3Z--w7ZFd&`K3xM]b%OLp[XU)ݐdj;U7D815.mxU"zU y_A١i''NDҩW%4ֆ3# t:LqIL\1K5#/@nVF0Jb+Sڑ£tZ5f (Q(A1$# '$`vU6k0mUZKɇFA !h" Nx,W>Pe.Mk uIjry#W.vIZh!A, ,cfZ7kmkYAɱ)\\;I1>{;pgt4iEIWI- z4Vl Pt Ba6-f4R;IMWSp.i k;u+W`Yoyǁˬݛ{%+'1bhO4|/oj\d5~;5vb۵[/ 1q(h % 3Ei=Dg?^)j3}KU =,FY-i.PV]ڝг0Mvn+e8Kp,)Y]ܤ?ʱ1r0A1 qb&HA Z' j_ⷶ_7 FetzueeA[9=)ߪsgau[|E:w4ms9m{2Ċ6YfSHpZ?DbBG\zd$ZcJREGycm3y\an9%]8e=əhyk)5gb30/Xޖs)͑#&w> i g; ׎<۽;wK@vmmp8"]a<􍠸*k,bh$&- ڽcU {4|oسmI {<́i>4YۆlgVf̛24b-xތp&wuAe6ℐrOkUhӕ'RtF*mVtr#IWbc@SnH)s5G 8`fbg`] *:pTa'97P1#R #崋IiM?i qbˮ-M.|<[ ?k8 -ނ|}z=4ÿ6F~:k~d(ݥ/r}m,*]|qyS[wo cqxV8;t,6О},s7g!NW"7/_CRR2ZTa}au%;X>ƋQ~[3 !T#jt8g\Fq1`|Jb7CFi L* +x6ͤl:l:l:l:jiwF5Rd8wqar60,Q:]fRe5|;+ @;LLՀoJ "r=qH+8E:mz]$A*.?`Kr@Flv2~U8\FUn՜ܾS9#?ߟH Ύ:[*OinwxKk̡7īpic -4beueb 6T|I*#s̍KoocuL`3Z|H7T|J/T5h(_pر:v jlD=$(~Ĉ{(U/X No?2]cL" #JwFA̴.u{GXω^ H,,UhjŽK~~3"䞜 * ~:X|5-W Ǜ8UڢdTiW[rl6O5~Eꑌ3G;Wه45 &"фK"rAoQ=r$Ipٳكsgof>u`9^hȮy;*G;J*[KSA!?}"P;M~/k*~ 8G|2H_PjUq!I$΂4/o:Q?,3vX9U]cm;d\@nAګ巷?5΂YOU;䑭 A榋N%ұ^5ezX@c*vF_3f, `X!C<(I\$|5ӆH?MۮlC;["|r@,kwϥPƃNGyA/xK8zJs߰캕y$d[tL ($(kym%w7 챕bEv" hj0ӇtIKҎ=^<8=bZs X9fS ݎ\DMG1WkI-x㫳 O+4: >ԊF_f`txI?o}U(қz:Ѿ^^TѤgu$Ё)u-x9zA6'9[m+!t0]W# x7LmAAtΡ}T$i:e:(b:(́YқJ9JUiѠ T-ZV,pI`!DD(k`V˨"ɞzo25/tAa$tHPl4eQFeQFrf4"+JmI\gTU`2;ɌvL/4ն f5cFY1wtn;4^UVa نXw[{&RDRh&ëRJej~ЧTo}Ȕ~ID\qN8eȲɝU8\SD c)CKNZDj\OȝULe`2$o*gتKi}QFw5}lɘA*im8UP_Ɏꨪ⁆Pj'*c]l{~LuݸD`aȉt,w~hD56Բnj#d܌#|EpQ Ce + Sgd>k/mt6\ qIBK&PoK}KVQ8ӄctV9J`5SHSX5;h&7(ud؎ltO1f[/w^"Znf7~/ffr;Ece\9A夯3ATto#+7ۣ!1Eo,zŤ QLʗ_߻pcm%?qWMW{G}s3a='av7ѢI mwD:h$=XmѬאDoOuHÍyE׋_3[I؝yG +*lm4Ql}R.|fǚ;:p|kAd3ѯ -"b@5/ہR%XO2xlYY&eUFҁxtr.y0Tr3P J2$okSzIl)\P"3J:lj32UtNRG+]QBY890 Ц9pn\c/) xr #!'f-$Tu:[}IA&Yc@)% !fl?IExȲ4$!xw0lnhvl'jk0Fʤخ'8[cgR*d T CfiP:I`H1kI2+cinQ3 M!v?Cfjf3}K~6.tR&R\=о݀V'}ޏm2GGy66hOV3(FTMÊst撮j폿fNob,Q?rj,ȍ?Qa$@/"znI S#F^-?|nR}-Ty"J}sAkTr!Q91(z|K&h͋'\7e,)ᾸXzjj8 ޡeCh}'OnVE]k͍ 5?=\~MY6vZTn&w3eqFT/BEU@i|Q[x4+wN;X#}6ebVAH4JLhŚismdL\|vbg}3m `NhQ*tRoob%"""SJ26b7[|` "x{-+4FklGgL)Q^O&2t=@ [Bu35koogן&7$xGʮ?y ^BCH _4Sg<#Uk(]:T*$-R˛%B"9tS+ti=TA8e`9ʲ*+c<Ȓ$!&Ux&܎Z(B%v7ȇƒ£fEHsn'((HL5Qj\4#Jrj9]jGgz_)b0+5];6<׋!Gw:YoP#ud)fTUKT}  cJ^okҡ/InWJW$t/1$/WɭA}$aؖˁ;3;2˼CtB8/DKG 2nm Akra6eP<]0@G.T)Al "K$5hRZu͕AJZU oֽj2 )8iDObc>L[Kr x@D+ AT!:+%բvdmfu($ҧ5l#CL]"3bMivzm%A$=9HvV;Oiwxi2_/~RB)'"UV0^G}"+ayh'uv"* D[tf I#᥏B87b$58hLGb׉E7_(YLq~zP 5Rב}P|Yx\&a; sqʠ=3xcΛ>wƯW߰Ks~Ho<~r, Y 4f6ئ]ˀ Duh@s . 8C X͋hԁ19ܥ p6*STY$%M0̭B) Ҩ!F˘I@IJUXHI!*i s#H_Bx[fG1濗 }H_2P8L=%a/ߖ AWOj O'JڡSJ-7;zN@pCҕ֘Pr2=/ ]rZdmi ^pTh=mчFy[FY5Ї=QG"֦#h{/ Fؚjm[>XLp++J{ K-xmw_ g]<=:iN敻1Zq:"۸\K\.H8,y@Df-K7>7")v,BbXv 2pP\z/#1+oHp4t]nj4"b0vI>zꘃ1ϵ(αxς2 {, 5f))Ju4W^]3a%MDk |gL[{]#Ɣg^k4FNLc>`@^Gic@C͝^@ (+ 9/$vYj'c^0D43~|Z5hVR r-m˭{4{X#Ez['[АwEފVwa&B_מbfOql.)͗C{4_tsik!fA:|LZ[ 9f#JB"/xHRMڬVCCŘl&v$HN̺M5bqdv~8Pl0@G)qj&Kݏ줨y˩tyC@msOnOBR}5=;knFO)XJvim%p|[qifEbD7L>fP=dԥ`hZ,h|7Ӿ,tCZ*6u%ҕ8LgSP6vy=.\|xsnI>ɇZLZJx׋_nWݒ/j7JZpAGݢry 7Dv~~/Ċo8~z/YS#s7G ]?n*wWFAz@]mŷO>H֍!}aQ D},YArA5"|wsuu7qԕ =Mܛ3ܣ^ N jhX3,29AdKg5d3%Leel>>g,'|"v~vb%_zW!vx 8"6Žȯ,әj\܏}1}c#)=p87tV&=q*>Z [zz%gYꦓGZ{AyQQ[T^Ч52Zy ue)-m[xv|ͅ 05$:窈Z88&+ᬩРE? HN05Oq1LTܶ_ uy-{ZdK0TZJ <F;"r:P2W)*t=R6{!.Լ6k F+bPW /2>" V8 _(TU0a 0BJ1KjĿaYsSja?j5ըUr:Db2e-#^fJE5MBX^=Z֢b0W2!)̼,C):Hڽ+Y[#ؠtD[]{MXq(`ԙ0JY3i8UZ!Gΰ%fCBqyf ^rjI,8Aϧ41!#F,22kq4PSrmu8ړ~]{)N5R`wwT?mW[<]߳z˫Ȏ$ɭB.D ֒A; &R0o;$"ӟnn~{|WSn&W|P_\ΏzfZֲqo$q7/:VX$B5vy%V(&/4h1h?᪚Yp3'I@U4= 8Y`:AϖP1`9,FʲPѨ$ x ZΨ39֑[.^= zMQ5M(l7w5uQSKZdZL)^#nW]ןV{h~~D\[͝jr ěՍ'q~9MZ8_ M>|<﫻H#ć[GsryMxz.oVi1Eϓ@ߤp;ۻq:rSf>b|.+eqhORsB{'skPp!\EtJØI:Y7 x֭)muuMf4䝫:ie% ƌۿ]^Е+\Ds`oehrY\#km6f&1%7}}^NJ?\Hu]-,û(ْ2(\?}ٻ6n$WXrWE h5\9r[IŗO) HdocHJCr(R2cŒExoh4U4LQ3fHPI](L]hTz+BQ1ow2+@J魚 QYij׶:r&wQk̸B:ˆXBs0[zΡ U!נ[mki-h@joI^=B8( [g)5m Z](}r ^/஖8PL ٧(|7NY%{Kb~~uY/}X9_>{[-9C@S,)MYHBI !jjPi_@ ǜaTA`+(W*ZBxFC['Mp+fЗ\H)YQ d=jH rCdHFsƆk r!d~TCXY62HR ebx)&B5(2̴Ңl#'} x/2F2/ >u.킩)(!PN?Kؕ[ZXP۳+6?8VC{_l?)އۇXA~3yP'n8{Sd.̈́e< (l$Ќ=!Qa)RuOHA|W ԳR  D81raDXzDkXKs7 6c&&vP>$+.2mi7ݺb":]Dj3v^:\D;8n]1Hn"z4ֽstvCBr[bQa,;~Bn"i:gO59e`V>W&^lƐW`s{"ynhZxma˚ꦕݵӒAe=BRγ9qy/b ɖVW-wƟ&b}agKA?18ݠlE/7S)uϴ~B6_om3 h\]usg66 "x_o(a.(@(w$ #CO7)n]5ܛL˹_Z6?/?= ~syc>>wY|*iMWI&iUƚ5j4Rk4:PY/]Ϡ?R0W|sno黁*ЮRS*3S ,x&?`ݚJx95E((jK]z +ѿ?FN74h~N=Dz,nlnydd%3_(8zS< .@xKz(N9|Y×n׼Az7Fѧ]^^yK(iO|?9@+zΝXX:m?s~xf!_>Mh5Z'օ&IͿ׭ MvLYlw_'57h_(gO}U-gXAlBQ=^Gyl-:hĘ sc35"7Wb @3?(Ʈ4_4^1kX1}Uϋu^5HF@6%hSyߖ&E`0g`Oy\/ܧ|m`I6=| V ̨SB}t$/-k RcURRSV` *XK'*J^UPUo͂muۉ񍝔ckʛ>X_>!D/±R+(SZ)MI,Fj'űVP)Vϖ\e.m+/jjSix5-qS+.p+h2n3[ $~ȥd(kWm+߫tG'G; xe@^DVu!1Cc^!`s4N`vwu?ÜR^/L韫sE\<q\4r74Uȅ(eA2.cB!^zP4J&" (@Y)NHǽq5C7 9.kOyAo&·d2$O#%+>sr14ɥӠv=gBhwJ{sqQ7Ă|(6_Ic k+wB@YU L[GsД30lI vN*H֥.Tf(8]qV  - T茖^f!62a)/E/غ&bE!wW:vi~9'2r۳<_><ΗZyJ/g`,%g'OFl5~WH$X8WQx1T(,TJ,L1 dА+ rK_!#Xj, rt .@%# dH͕܁6`d Ut(T :EC۳ac{l$x~K(Y`PZ!D!96u%?o򄭇 8L.@~#fŲEq\ϋxT0I0~W!½Y{ʥ~5$(oI([zzw$|$羸}{fO 3)r^2+Xߞѫ|ZM{S5?sUcg>- lf$TV3@e,lA.yG!ɷ曋N& $Iߊ耓 LXg"4,R{kE@OdB{#x\噩]fV,BZ#)$@@)2<%7Ų<i^U,G&:bŠFY.R`*umJtA"*Tm(8#@EYwzQD8 [ D3AKUd]uA``N [J! RS|jKUdivחسEhTשKWǮ"*YQraՄ\SRn\& R O!(1TBJW(='5 iy/}-A~hN;CA换 O^i3I0ø်$-A^MR۩2BrcԎR\Dj{< 7l& =2*ٔ$:gų$SրmP)6pxdp(UYP.[K1fX1EA s[qeA Y9Wg6L,Wae$i$BhYY+*n*FB*+ D /+(*ɂ0xi2MIKZĭL| gQqy*Z Ujr,w @()l/juQ胑0S@S|EE>XBe "Gb+VR/5%Ё#fR@h!ccF: EXQ.V%UZe)P U舙/ JVAQP]x8`M@+I Grhl(!nabF.nxAr%+MU$[`$}7wFT(I|$fڤ%b[VUa 0dKZ K^@ +BDK0i+b͡B3eF gn ZDoZZ!Gh5ypZ1ĬUkŵ-Ak̤ )C~31-6dAm^"uXYK{Z7Cqu=F:11`U//KUэPLM}Miٌ P7+COnz9J"\nO%+Rjرkʿ{FᗃNdKoĒ.BCFύ\zk*B̝W~t}XDCS;ɮI_/CyY eOK6m1gU/} g#dF`ʹ$\Ѹ]yXW!X)_>׷ň$eWE/$#|+ ?)R&T8[_Ub6"G[jrrvo ]8)AH=NNj^Ft4_#+Fsjbd],Kbu4.Di J'ѧ{`_,w[ lHJY1xtqBcCk:m .<"E"C6R` ˩Vt"4ղ!dtj9VH7Bnj훼?_,sѧٕqբ@MHjv'Uj:Y8wY M[\sf}y{@>BhxN@YyY*ý6P(di뛗/$9݇hNX ܄rʅ*:̙TnGJL('cH-P.GR [}zvBi\:ыp.POۗhOkyĭHG>}o]OH~K'$(x՛/uIC1*V9rw%cV[xfkp.\O(d,]5x ll*c"gP -)@B} =b|7u ~N~N~N~N,A,feZZhHs0r(#$%1'g !\q *AwriU?f^ў=E1*tҁ % .mh%Δ*D$W8H*d %|("OQoi tV`,q8B!0aF=OZI-O%I+Хs.ʐ蕲 GKyk RkN34<3iA w3i{cGلr-c"J)@K#g֋UPN=pF#ۖØ!zӞʸj%Ei&#j+d\ڮ T.fէ؄"I5e2K4 T|L,q13F.YU6N2F2[?B%d&8Q4%}wNh_zVgŬXpNflu0MP7unu b˂fWuHCT0>zxK_mHXVo0w7^{ l1௷-O.^,;.;J=O?;9yyzFOd?6BL-!4}ڟ޻xN +ea6U{W='N"yTgc{ޫYY"]] c跞sћ QY7jjt'LT:+'ߝnDge3;bnEy}r~6wkΛ̜@wcNYQ\#'{;BJYIp!GIbҘ,.ɢKCH{,m ,'iA&@ʱclNYliPy>*o8x]m#x%uE($XLr͖=[gVUPW%C 5V9ɬuZ[ZxeOar,[˧$OḞpz/4...qddzi#Ӽ:7yf@FXE؍#O\ ӫp. (kH+뫴3}cւ~# >nz-3w[DhZ"]o #b_M-?_sk>56}5!$ ma#gP9 LRƞRx8؇/N$nӕűH! ɂP}3^^G N_,~:b*jd2NĴ)jueb fsPm3 p 4ƃYQ{~u[~E\$߮( )٘ ig 8lsrLaÎ&(䕓k^eJ*JƳl9\p!BXY̏T> ,^_"?ͪ?ͪ?ͪ?m~炈<@,HDT^N8T8R\@@|M; *h{ZWC JV˒ GPYp̈́f Da> C1EdVQmcAiA X xStӨy1փߖ )If_S7 (75N#d4Wl},v_ׂ ZO`J){=5/ScY.TNc׈C_S oyuG}pD8WG68 u߉b)(h?]\~{Շt.:PŐfn +Lؒ׻O4vt&hXswTV^Ik&f&r6H1wZI+ޥy!7z`t\qjlDܷ8 G&[nfvJ_"t>" J<~u`C Z@٠>BWq%Y3>J h" xnu>SE h -'*uޥܟuBy?<dIw҂ X-eQ ңFDYps1DR EN"U0Sp [8E0E0*ȉZfMhޔ'$Q7D+Hֽ 4/-;7xu_ (4?T7]ƌ ev0ͅ2]yWDyO)1Є,,ut,WtxPJNۊ,עLK,ٙ'AJFP< XRTwW?4j$KsBh_sc@1'بNz0"Q}`?ao܃zL?05$adV`@J *4Wd4zCΐal2,24A+6EeoTJbK}ΎSlE@T=󩷸Fq;,X&ZF{iiiiSWR:+: zJVQHI:e@ Hֈ[k2iqp߁z |gF\(Z΂59:a&Ĥs0zFv[r 5anXFr@X>Xꕤ=8jCwK%v_mr_m%_mVB}w ZnG$mDU4ply)OoEM U)#gT)6o%j[<̒Luve4#rwO65=7#1mprs?={'+|~쌂QR\?vğ˓=Y}nt?I.wIu9zdհ3]y_kccy%Wԇn94*ۀwX{n <ڴ1E7 ôyI/2Ib)}fU@蒮.!9M+ ez€#]s+u}_l߃|A)ǵH.=Hf6r$P|7 sL6VJ`BJ@*uY,+kx)J)R (.Fx{y{; HLq~MG4[ƥq)rpBP A|mt\D Ӓ u< vs:enqqqqƕ[H4;9k%0t# b"s\Cρ:|gUL</Y=[”N'BfԯAC\jxZ=h :nc@6Q /A%ag+4(W`pjaܖ /s/%rAeVZ|"CYIwgUi+xƦ܅^)8?vcw6Ҳ0Dz m?91#?P XWkt#X .,҉ ߙ p|=6c!xƊ+f<y1c8W 8WseҎ'QFsnlvf4n@)D8`BM"Y(}0%ettutututѦ&24r2uzZ%Y 1H,g4iuRL@5}Y9jzPrEqPXΔˉDo۱i0 -)JWn<;W Cu"6MS+6VX9o|RU[xꪡl(lWr\xG$#IQбO-G$MSx7Ҳ# @ NWY,i)% ̀Kń:w+}ZsCD AGE|@}/׎ֵ#|}ɒabkM V_P bYd3eDdeZ5]FkQcJ(Qoɾ\o$!YT ˠf E$4&=j $1e.{!IT+HΒs~fU:y}_3IyGE‡kjbݪ7gg^f-Bl] 6~0Yf1<|%mSwIM@pIN^z(9aGDNaaY޵57r[鿢l W%$]Mj\Z EI$G_Eэ(ۮry4ppn8G; |B Rv!{ي0oߓ$V¯]6 W}*7_q9HH? DPb/v^|XO!C]6n}~PDuހO˻K HڀmW))_4rmL6X Y^?gW jMXr=e3ǫwbUg:ѯO_n7ά Of#z}gvqhg?O^LfY:[_'_dF #-_G~sV[a mGх쫟+&ᗄ5u6Ĭ\d$]iD[77J,Pïa0e ȍwk8jv[Fƿe:m/:5$HfM)[vz}xB[+W&bϿ01kG}U\/䳿\Zn۫5edvWq}cg}^G \,_J_8g iAۍqyObnsm%!hn٠r]hg;ߢ$T/W>-9#WdJ4p~*l7gr3;wu`xD 1Bq3._AwФG;_^x/:[0v@cizYP2VZ#x[7A to$*#?ZY[^qA+HXmZ]˃q u $Am# vZ !6t7ؿZgFӛ@t<:AI -pxx炆\Ob<4S~)SNA'm%~^V7.ESVTr 'W *!2CfBjiCBsƤ6%s"!I;*$b'.jE'?ls1fNj 92&ᵨQH`KMXU(Z{xu 2) O/ I2$N$,>w.LIg6ZTZB X!I2h&P NK`Kq@vIhfn6U>'`ty>1p\A/}a^vڳ~?yQ-ٕo[mOz[V@4Kbo#ROR]3Ԥ(AsO4(KɂmJx7 M>nƫ (PT@ *CՌXp`n쌘M#@O|s3篴EԵ1n;1#; XpK L+HZѹW h9rt,3((Y&G[C-rGJqG(?K񇠠ڐy*xZΩe`^\8)D2$l6O }l'9"k ]`ASHRqR"&>.aYtҺu f}[+uV]=**b᥹1@闳VLj/+T 2'SUG.5>C>٩1ndJ#i$ri!dBVV*uTwZl<+H:sT[Q稣 Q2-_RcQ#H+^Ci 8[Es(,t`m3mhѢH'l&Rʒs-yvgћ\PF\TJ]Aת,M~S##Çjdl=Vh[Qn$1(W椿<.stq Oc2'IƎ] \eqV?pCUhE]t^'D3Kmtp[uf2H,}6_bX:%#](s𹒄S\u{ܰA)C>n[ү&._4o}joNd;{7jCûL[Sr?9ϩP0D)[ #x]@qEX^WԞzwt9 /g@ABypX =#L?2V M %_<ÓXBzU !I19PٛHBu8"tpap1f8NHfF-UDr"I>yc+==fYsH?8ު? JoOBh8rgd){dZl ? t!v%>̃ jp5qqwAx IDzJ{fOGiAJ-R1 Ȫ%õk7c<`ꮴ58A EM|O0U'L1`*I5[Z?:hh͡x"e۠8f-v),NA@Q$Uz>y>΄ڈZir6%\3 zKUD[-f~+1Y="Em ZA?)!ݡ% ⼻mνӗ_6qere8*]ZD{ @=Gz\̯gs\9_n ni0(H.c mBI CydX;F,^| 59t[śCPQTqx$Bz3c>¦`|6_oW֏|FHUK/j|7"EErF?xa)r 5| z* (Q,;A7L'.8J* o>_I ($lL5Jy8et@s+u-Yd ޤg4bOaIJи076Yf*t'Uu|>^~Im]pc㹯vD;RP BrSd7d~γ` VP$@==u농KC\l#Y</oY^=PXxH`\ME5"dxM>;~ 10kvq HdWBK(+ր2m\j aul3sGPB[ln.2&*MTlN.ymNho /nOw\j "&o@/G똢-EHKӐrb$\g! uUV9AԂTbvMPm3\LMsZk@W (I!!3܌Dnf9\8Ht$;:É:]`]9Mgc?2L$0YOtP|w{fYM6W7Z- HJz]8E}|J(CdNّ["3W)GFf'd)Ss*H=ѐ5 _aRpS-iz] ):A%~@T ZLFB$cc"twA(y>fq$OQk 0.eJ!PMÐܒ.:lpg7ʸE둌SjnC=J#r$=%cL 3ac$NY.V&ϜǓakk0ugT<$*^vߌDrd13h8oާW\]KU/.S;;1{~=Ѷv .xt3s ?dvU2G ni@y51/>Օ?gnͻIPj U\xB6oQ\ ,BTf;v3gwg]\)5O5_/gŹ9ݯͧCuiQoWC ݎc0Ñ:a<^Nb^K[2 ĢJjp`'r6B e_x1W똦=MIy7;&x{"NQ1@VJIWļ|YCi`r2ř5ۜKENwPo 9^jI * *T֕VT67clo9̠Ug(X$r?snZ).Ry8箨2$K$=q2z2AXnGHNS@Ԝ!zQHm8Z{~RWPׯjU{AKn"Ar"ݡ% Sg֋4Fm"I!|d&ElRtŠGz!U;,:h4TJlEசRI-K8 r&;k eKJm1N>XbNmĘH0xwTjL\/OXXZIM3L/ڟ%sUc~щ1Kg%oL3WB~^?ZZ-%i$ uGoaNo@Td4\4 `lnK$xK-W}qu_Ar>e)I$,FQO| 0njFanPR"1CP/ng6Ϫ?$ eY݊E!v+Y*;uuhp9ӿc8 D:c&{& s n)㪙F*sq%YWr&&d}[3 DG܎ib} ך@:UNtbm )^8c4 $-ɋ;:p4;uY`/X ZYthUc3!t!3 #vQeb v.RET$[xye\,-Pxs6][.H\%1Wkjfi-#MMG#+ ~=$/E2z3\S~tTc3o1 [zQ#;p~,i3i"opO5DIpNY]/Z^;g {_a$Q(T *BU{4Pv-PVbMH\od cʲʆnԏCSu- nźu 4Df"Z-i-D!y ց$4i/pf+:gMRo4S$k9 ,]U} Uv5fj;sIԔ."BX/_(rk .My# W7bH,/$UE K Q[峛Ԯ\Mk VR?v.NFߨ] ,ʱZvUvu5Ү(/] NWv^a3⠜u^RE EE-ShI+7_熷numn eY/D!BJ2@Qv:ېY~mv.ǶYo3Enuph4\?ΫΛW={s{z{rΫ_v:o:'D; zbEgm O{Yw'^t%\}.Yuqg[Y/im=66q%K +[R/V *ŭżqb髱3R`bϾt`ޗhp6w'3|⭀, t<`?5sW)t=@aS,yڨ;p0p7UEܘ ڳ O~p[|rq5ƪ5ׯjmpwGc)j$yu~Jv5 mo_tG]=e??lcyf6<ݾpppͯ&"Ls#V7I6nd"t4c _Oqij/rד\Wd+6!ݓ/"N([[p>hADž"_1.WPπf (>/5 (']~螞ND6n r)9z1jvZ]n4|zЦ6V(OpeWewu&!wR g)䃫؈ >c[r8$8fD8\ +,&" ,p p3"R$ /\Dqpףްl gk8w׫˲ˣ= [+#!,M. HX#F!l~ztN關HcTlTzvXR%,)K @OVyDԎ3EH$Xy)A;f]$B*P쵱a KxJ,#а%4,a c epof ۀQ `19# la JXB+P1*u=Q~ דLN0Zxe=x#!%7@CJs$J;d UhS vg UhBCPQrn$ E|;GGJfڻ E]BZHw@*o,k ;AO(垊 ttG{tJUַEq:[t~m cFweX~=/6^no7<LgA4epKCq4՗?^x loܚX@x%rKc(c0@"^q M#Ar[qYnڸYβI,֯Vrφ`aft[e)P2աqZ0E0 %s*Bb 7NJ68XLQH=rQ 76cFc܊|af4W-݂V [ ~ OFJs&-wMeFGav}#xjWf%dIi2 XD"-"G')FHTVS58 XL@1M4NkkFb&^*h-Lލ`*f-48o<)6pDy"!NTַ&޲TxN…p'b|RZ8  %si 2A4@gFDˀJNDp-jD 8d,,DLҊJS N /h+.l%3΍66 @q"ڹHR%`YV(`x$q_]^ǘ~ɛ(|,M޾}pGYMp/j9f ` gLLpdaT3 V(܉ K)z3Ʋl4 E5~"AQ"'1.*4cW3E:zzX˴i˭A:S 4 BPr"GS1U][o9+8*C0333`kJlǹa[X XR,_U 9v)P3RTU .Lgc-1ͺ@UPub'7At7:/ȟxٟGzϟ'2RM)& A-`w|q}-24~иi9ʜYj$UY"VZղDl,Q3O&aIC8^Yŀb@[my%ZwtH:G迳#؈Y]q5 *)_m-CB@hgPLZ dfGOB-P"! 6#g j.`g8JoMy}QN"8ZN]|cxp a%O&f+C]jo-y(G]@P;yko]KimֺxmC|c uM5֌'H(V]Zχ+sK&[*yR37c 0}rH.G5r~= inggs;0wk$@T `U7z|K`uӹ"WlrPmu7qj!r);a8q -H3HnC~;1{`Ї65Uf݄i4Jl60tG#` C kiP"_8Э> Qh:Ѣ`DeVB⮍C07ZH>Jyς\P.y~ 2C?AN-h|YpnXqXzؖ}|<(嵂'ݒWbm`Gn"8)X,z/Tz]7 zWۿsU#E?474H46 z6^ n kf|v\3,ȬV5 ˡ!ͪ'h3`KTy4lA)g7eo(}sjHZ,D [da6|4s:BbGtm/;|!m[fiuW8)sz죡3&*ZZ5D]C(fr >['W6JB{s}rJ]I\8~(*(ˈ!'JȊ%fϽ` #Q^ݡ%3($5L|Jga<ή-҇$ ^pI'EiV5_p˓Xe֍T̯/Q3?"&9k[3ͫ$j/v >տ__zP_U~xiʽxߌd^OO]SIOޟoE_|˷5atKr;~کئ;%׳_^pT;]B'6e밑 l*o,)Lf ט`N[Pvčp Z2έ:SmzuDn;Eokk+4?(6rg lCVR.Hj_I}o ;Îư1hqc"[:MBh#bzr- F@КmѾ }~vJM48eVaةe6[ 2 ǴU{,;cRiR :mX߅[UE0f&zy򄪴0#٩dqlB\5-o2%,lKDM"I3H KnBDsW;ȑfロw.T>'N㧣 rټ&H{uy{Dk=?1AL8:.e/x*2@M~U-]V^8>;R3$;r kQk̳zbvrB7߯ɫ89d`]7Wt@vJp:bR[РfK7wMQ - Km&8 !(tm1 A0ӏ?2* dɊ(LS@# 7ƞ)i ѢeF%n )@TKP`-lPXU9ٳ.ZNY?_LO{]A-ÏV&#KDZ IjNS) L!^/ YuQ')E/mҥ&у4ZOܒڴk6$bD=ʤ !*WT(JUg0!W1CFv|%pCk^ġBAՇ7;<9#jK"4(M1&( )d0AV-E @DV/>:[(@i 8 5jup4Ѣ&I V.kCƑg "¥NsN*5DgL- =vNYr$||6==3]c3wy׀XP&/*bbKCLF u Ҫ %\J2-\Z-`d~R,8-\+Ho@IJuIPOȐRKtCr @ 6nXM]d돯Vs1KeZd>wy[E"&)"'P@+4/ zz= 5!/8BH ;'q~(4A&s%N7z{Y:wt1d.O 3P}AHr2Q1PU[( ct%3%;S.5* 4!۹Dc7RkfrNk$A`aβ}Yaڐsp!$pi* {sjp'5p>.E; ]r-U;Ct(SsdV[t8ek!*դ{!RGfrHnE$|5YyTSW4 DZlzX@&prZ# `񋊡Ϲ!%4~a/y1,ٛ[1E{y<.n*,j ,Hk$P KhWm)lc,pTYHF}鬖EzIK++ɲ7, *ngRG7GEvK*R@r+ɤ)Ѵ \D# eT (-jm~,喽E%a(q }Hmp m0FCaK3:oMe6e.AEޅLaRJst()+uƠUmi"<<'kRHpKbDщL$vrdJ2hq ;DjJ`٥7N+[L6(&%GO󚹹EQ7+ےZxco/9'hrh,Ln1B;5IE4D2t\NҔ2JQ*Őa45+5wZ{LrAA%t$C^2DCSY2+B5i\LhHU(2bmR"$ B&ޤ8!k+`6\ +걜H\M޴U˧f#4:c3rvV"~^zybqEߚzo LD#xĝގq{}]\Gk,ߝ|}vq Wb?vtܿ#+u^̛/j\^sq}G퐥j$ͮy_ %m!|*za]:zK߿zV±ɧ\miߋ#t/WߴO;vE}%E QG_)f"V$}]˄=gdIN>^ٯ{LR{~|㺞kD0oo=~S7YZ_s|Kf5'>ȥڜ fm萶CEO(= mf"n`Ϙ0`\*dy5%!L Q@h5eûdP&Dׄ)(<47ֲ2f6B"336w Y&D(y k V@wp=]CVJf Hsal"a =Kk %U@2o HMZ5hrnO=_-ݙ?ѺI%sHg~o5{Ԝ˝Z|iͷf~(| M1U`|cd%Hjz0mj^a&bT6/oΙLǵg2Xvh}|G,h?ϟ< g4PJM܊Dj9/h{_ỾH*1Gwiunq̥7V)2o _6%6g5OPWu6)G]vE%CD]*["HhutW>>`o |R0xjIpG NLM"KU~qJZa>N&|ۑyLe@7AVUeyIRSI+9 +Rz>ǝίc;  Ƴx5s p#GOEpjg8{R3u`[i68h* =FOAPgi2Fzlk~uIu[Rx9[;-4#JWgg>pt~^{䝟w~Jw] .+j My5u=Fhg!ԩuJ 8&X?5?bzju^*یMA=_fW]5Е˺}byY=#E~aƯW|@arID wWo^b1 #Sc^J$Qhjp5A5|PGFBxY{cphBdgq4`k,w T+1;M?)H)RnOzXD mup młCb"2Y|3FV7MLN(cUə}::):)F:. uL3|DhfYc޷ܼGpjPKi%@rfMJ d&aovQ{Ň؄%Xl.hR,dˁ~]'ܫS@w@a)feujrOh܇\j_2AQIF.ݡ& rX ԺN<&h<Qj}H q, HGCݏz1anYxtQkz!WvĨMz^03Y 444`J$+EBr{fq `D*],*rW*ѳqG%ƀ򚽹ERtI\Vfl;zC-B]R p?IQxڃJܒFꎎ-No^|&ϛ>q]!ysu݅CUŻ)xOpKA|t8 SQCbE_sm AT!(s@Ӝkm8/I*_|zᯨ]|ܷV;S}/W^*-J:fȡ{ba:vL jܷ}r<; ( !q>#-Dy{r‚VW:Kg߬7JV#>m}-*oF*ULR8#U37`Nfj>T1$7ڠ! n>ο'~ٝ%)mt/- OtIFcr;lٔbjOk,1)vTwJbHA,6K85f1uanSջ/NL79i;i֏NM1<}z/C^WÝ(u7n床~WAi5ou>]Bf׵ʉ^?͔F7tOפ<[׺c|x781LSv ˦AxwZvxaޭH:yYWjC |< 3>:qo狼<*׈z"{RR}XK2׋~=EaL' SO)P䑅*<2:Ԟ=y(e]Axkr5Up(@@06 .ȒY. n6nD*Ihqݘumʞ}RySĎړ]% ?L7Wp錮kjXsPc{Eŷ496W&!ionw)S%KNwwɝۨI6^F @qyq X,vG|t40icL#D)vc;$m&#R"4&oΡ.//$ɱLBi+٨ 'jp! W,k"U?w ԥQfCb mRJ?imL&%yD Әdi z،$$'(ð|!.#sL@qLࡦ>+ ?c=o!&@x Jd,)s/s# DM̮ $BpS4r80X(M W,c;XU [O; MESxSF\=^w 0gh~4&JJ!8#m Dp(Ϋ&hXh' q*mn3ڰ-" >apS&9CreEuޕ+\w o&I<sddcCƹPzlLaebn:Q#s߽2?~%6RR \q&Ӕ~!H]'EgBP9D#İ GBeJI8+c$MbQvm2ɍ݊)8ѳ$Ws ˈst( iX#jC@D ґpxDY@=CJ /=8gJ8y}-J?na5dCbʙ"NqU'[DRb1|fF> 8 -&^q@R URd=i\ UM뺣p#Y^ 6Q l6rR|l j[ԗUprHHQ:^ OYA $yt&4Dz|ky_Yp]W{fCa,` H"εM aΉօF l!T&@E[#w~`[A/̘84IH!-* Di>;E}~<>1Ksr{+MG^#%J$;z9SNowئaT[#w ~bAHDgPD%L;xH@rR`5t -eĿ41ldB]a2SnvIIv$l0WA}#NNVn7h3_{P'W5ek|A 0kmmۂOquVQ^?~ c  ֘R僷vm=t zt|(N5xP_u5_?Lπ/ו@v`_f>uәaCYTfL U۱*H3A+" oo j`ziB hI|"j~E/-CP4,Ŕ Pѷ|2G+[}Ejf3%8aJ4W\4DqJ?Q TbWD4X@ qf t& ETvn zM]z,B\ s^֥Z (o߽/夑v>gn#1~v VBSw.vÑs 1 -ޡV& m&[fiJΒE\DVlU9b |js[!.Gp;h[{y޶ tf<~v(W]gd+ !%v@l; ˇ8YH`zsԥT:6CY`=*<^4ʾ8V +ɬͥF6Km`٤A 6A;ʶ!3)w`YBx,<1IY[D2:g9XXrV%(qrq2F²t7dXx@KH O 7$J wWjRU5\`3sr !9e8WQ LH `D\AB8 M'jTv;_|V*QفW}<[pͺUq J#z$NVe&JXNa(od~_ePh9_ ڈ/*JU:{HVQi&!Ի(`-~՟_Ԁ魄3F8ҤHENTi1 ӧí_-VN,&֞X\^M$-& G-^?4)*i1E夈ʭ0t?vx|Kye)'DcUU;CA\Iɢ{Ql7;O;+)$'2GH!IʮrϮ;\ *(<;m?ɭkklàRr`P` i2x} ZI띩=mXك]xo`xx="_*>;{T)bzn#(Dc,HԤ9@JסÖZpC: `p{[ Á VɶuKȡ߆la.D%\P`rE#8^_I OJo@w~[ k}iR鞵P&k|O;W+i)-I{$:Fbj/:`/ %qGLM0!g >! KC (2Mʫ!D=Q(VDiNl&az /]CE__AnK%ؑhRkuvrm1YN(Ft4|XL|:ᅵ١d#bL]s~!|/yY클Y9Z@e7d¼{eBZP`%M0^G%wK0h ِ䞩F@9lo{5k%A|8d -joAhi.=cv Î<ܚ݈L+!=A v-1 z fFC1L8 0@08>=· VKaa?@R|i"C %M؈#⡷VQץF.hDQScB\VڌVPšZV"[@Y+i)t~*u~WVl6[lP{j4-)싇} (WJCx"0QW خd2J@Ԟ rDѷeɁ9:CiB†F|X6 jsqp8jspG :\} i{p w PՕ j@;Gk@1僯y-,VKxq QA0Atȋ˶&>8 ? _HIM~0v:k\!- U`Do3rV"^!+A'!v4MG PbS?׏Hhp:RR}zeSA yb J&X!]1ՄaK% m;TM 6.EV,R={/jL T=^Ѭ~sQ^5>WJ0fWJ<w) gjN4~.gܜi\hB&}Z,gׯ6EJj!{ZjEh{:KkG]|.!xd</7z}j*_كPZbI\~}q(P!./hrvnMTrR^gSWJS&N ,"dr'ѿ,SKU}>[?+~_?O'O͔t؈q9J(e#J$QoQJG'|t$׿cBd_4t^Xgv^XL7LRvVϐԈ5Ԑe;>~U]#7k |'?k8PFX53A'*W?IxSfMvs_=y_q8B DgWٜ.5]Y۬ܛ._ߵ_z_0T^ Ng/PtK'vp9_޸Tȱ@$_^|鍇|^R`n&T>}8|spR~x~a?"BU ' ڬ, 'ߴC$y4}֧z=.ЉG9z4gVo̺ݢ_h\dp֋A?lX s?=_?ˊRD:?/8yWVf܍ºP7~_]= >Qɋq,PCE?_^ތ]=tD !R_O%Otʷ~:G_P!WzYSG#t*(Vz,T%ʫP՟X.a%jW^WvYLAeTqœ7cdg[%۷5jFϿ&2vWtH|p58bˋt;fTKUIr T_dJyJ) 9>ԟ!EAZQ@pV;ʦ L`{M5_ß \&_ϜP86:# 'ag&kBDte,+^|nHvͷXy4k! 8(qߎ3w8!O>w8zC /kA(coJ4/%tTj5f{σP`z`z`Zλ 8>fI&C) ˔Kz`0"jY0=k0݄-+$t牢k2{0 LtD'gk2k A- c3BeEs]{.v:GE*V8הo=އ]\?#=##}C!x_xZ2(Cu"FPtaPZ)Ԛ= /Q'q~FSWou.'Za,}.pRYND:\ӕtJ_s;B1¡tDŽ~+\s/}MAҤ?:@A. S#X 6 >\Loy< w* 7'PmCΛ#u@-%PTmvDZh)к|%a۫GF$csA{?;L_dEf|QgA6,=osg[|ҖT_&dNa8uًKoޡ?׳??]kiO|ٳIɩKXv2p&nQG>btٯ|#{%佼dE Yz_ d{SRAzEs+o5MYHT%GbvmER6Q;>ΰߜߜ27 (>7ߣ)4˃fѐu$y4d9ʄޝt lm.Y3ġ]<8NMKeCg0{u> xrBDnvM9G8KE]Fe=f31&8 >s9.mN1LH)FR \i/$}Ѹ Ƶm"L`\{Tɚ~=;-]Dw6$6]j2oVy?vZU2&}o>Qg7B6KUo.2eք͖ &4mVn| w;N.? Ϊ+zpةeX@5_7S#6Χw[2D]yF4^=& 3}3(*u3ysm3i{(춏P6 ;U*9F^<0U|Ut]k=C3'&49Χn|kFϨp0 ,*A6`r|,`c13Spjtl/V1:Mf$9uo{#HTM[ ̷f>;uhH5֍LrZ3N"Vw׭UUz,\ލC+&$zk;w3O;]:y#B}-B ['I`]i兖*rSZI i2̌tzۆZ6~g$7Η\e!NToP} r"lu Ll$K $Sd@"48A;4$%˨<1EmCDmw]2;U 8d xֈjXDX_Ky/ՍkFIc?}B \`x`GF 5C`MJ4r[%zBzVIą( ͒FH@o O2I?' C=#TPxtah54"⡆gDzwIJF΁$fp\BsT4Fi69ǂ$6pT@34(AFVTc7*p !nP7qQO@`P ,r0^D1+Aa" ۑjt^fK0rZ@A˥ٛhax#P 0RTBLfn )pBu7&\;G ʪ%g7PROF"5GAjؼv^yUw^yUQyՄrm*-A4c[%/ 6+?( 4oI4Lt&4<)|P0bI#- (VNOFDſ%CkC,Kh&4s^MM";rEvuG ں>(`o.ʾAFZxD$[TQe u V$5s9q46hI 5pzti]vE9nWvMLH: ( *ga+{OӳӯrG}~kWO?N,@bAFB)\W68$/͙%tFt0J(-t@1HQ mE#h0Zi϶@c]A8}yX=+xYSpk}h*јITEC(̆h X㒡kޫBl;ivѢ䓊M('G)wTLq<_!b"I(INs\Û/5}zPR{>mx‰| awhN~s)'8ɊrF&ʁ$|>eZO [VpnRa>ӕ FӃxٸ)yVlB+rsV߯2Oק'fu>8.gGLV~Mjl<7۲7a'`P!#xǣf`4qf*!Da"|:RP.5%ܣۂ{uҵ"Ec6g .-DZВ=?)FiVԮ}XG$.ZaFw' iIEȁ1f^d?5+2 =FMT|1zsh6.Qq.@ImC9<5C>KA0NBIN^ (͢k-V;8nbQ VD|6xҰE_ xl+Zf81pcI !(E@ƒb爦Hu=TVmqx) |QlGdwVf}2N~2J>YzZ݋>TzT>0`$&Yz~`@6*#90Z>D J. amt7+R9"n3|rzJ((TI:hi{mt{yy_eQk NpIwMw]o0eB5= ?gK;\Bť:2`V# m:$0ĒNOQlmalBy x!!K/hpb_w5h)5B5yU 9}TUidۺ.T{ܶ /; _\I6)$I@b$eǓI$H6 A"~so%Omy35`lOY$+p//6wR v'SDq&E8 /"n2@`N!&IIfs붣Q1")khJGzֱv}c%Ts$ڭ[.GшxKN*f$sð.370T fT31 M1GXIW\N-&,(Z-c1PR1{\@v @w @,sVq7p0F O*튐5gVFC&` ,voM%S$7gxKoה'DZ]dɡ 1L䎒q3bdBGŠu%eLYE2IsukHj]AB$~ ,qUI[[zS9\禤iOIC)-T h:TrsP~$Y4D[Lœ44IRpa'Kݚ0V 9:N"-#) $՟ dj$B7LI (u0u\Cп;寴]9Ó[w\!E(q[LiZ W CB./.WB&v=A[Žqۊ53SU~؍hBm(8D!`,[9YBPեQP FqQ(5)"`TXETp kвB\/@~ 7Eci5B.Kh7-ښȅ!Gw2)kS%|B"&w֌w[uȕvaKգu3.J7[$prޡ jcR]TM9 ZeRK<%CDIs}ZF훛JSSEbC'J42Z(XVPW0tx Y!xQI-x䬒uVgy =lf%|&xp5XD2ʅh@SAՍ$mx6`Eu/5HPuҴNJ(^M2vz㥝(zE7h_/,BHԈ(TTac_XE͛/[?Bˇ3nsk6D ^g1jQg6ۥΖP:+*zb!LR۔rR4.fLzJvWQXUkf=>«;9YC]]o.BRﭿ}^t73Oo?(V]ZWɗc]QA&al&Mϟ?\bM{w tY!Y/nE 1\wF=wx,}){qT7ٗKՖm{1Ρk8TCm/PSZtR;?'d{BP0ӥb"34},!\.ST} -*lq /iCk8tn 1m\/fN5&Z.[xrMyDKv 8OIe:%쒷[ʋNR2wdMH:ifW}y~I-d=Q6_PeWF%]B]|w{ʐ/ Pb}\"z3+ysAB,co=Jʦ3/<ΠIqK|a/\"/sW/(rOֆڟF|IA `[\ )EdBhRxKwdj>^̍K\8GkF[Gtn]mhqx:z(FITD&/`?"ԘV "D"+E*$a2ya *ϷMGĝo 7#^f4 b`?Ο@w`.< g wO]0Jj?h1D;5¼M`dfy7#;:w{V/J@tԨK O,.yq.yq[L^prb$S OGFڄ'$N6! 7$,(@w/Z{I݇wF':^LLVɢRKZNgH`yEA}=ZmO 6׃Tf(UxDTJ3O;kϭ,lǙK7~jyy? ^ye__/%RTF27[cTÄ֮́PEiF5(U5+ p=G:#У0!,Hmvϻl^\{U6 o>"##e1cIJ\ob.tBpY8cck0Q XqTmڻ dt3*JOmګkCO>%IW2_ϼ]0Iv>5L[ANͽ$2)VTpq߿]gT!L%^Y?(\0n 9O{ps "BD¢?zJo:/dx ^(>p z)~flf.gX42@,է`PSAL/|{gKn*\Quh5Z#+4[i7߿lo5hwtfdd!ҭ6̲'*~=9wn%.. 4#|/I/Ɉ׃fz5EWG_oKZ<{pRzi)g#LJ5I%;: I'D1K 3ج".)ڀ*B}81VVWB-T~pSd a6b[Y*Ie/dtQl{_C͈(UmdKnݙ#5_J-ܓhV`\X8Ӭ)e~ujD!Pݑt'1,J-$gT)6F\ בJ0X$&Fx mejn"Gٲ& IIN"ͬZ() Q%Ixb%F4NJ`ݥZ‰ -y{@d226j9ab%N' +L4a lߏ;/ g0nO 1QL1Xƃc$Vr;-BjxF,4Sp.T[ZV#. :`*Ըf2Ļ"d-2MVEʅ` )T~pqI'r릵D#9\1}&S"cy(աfL H-D4Īs]I 38ً%>N΋54f/r^̐t 6Wzf3ΟQZT mv M J heT=2W's_ OAv/L~ CQoe_H)4h[Eۓ/'7$%L0(Djk*42H"aeD|QjZJ}n\ez#‡WWW.jxCyv٪!aܣ.Bu -[o ekEj !JD4#II \+wm겭pnS>@[#ɤXBq?Q/jÈD#N[}p-PTc#u70{ =ZT>̵NȄ0Mw:M_M0fy\;.'t8*(; YYT[YyosipȒ4{zI)ǏOהag?|TΩ͚֠#_bR<0yi8A_c6}܃g}KXOϊd4NƒtBM8P0\ğrV37*xn*V]MV'%*NK O$EyѦ)v4 I'ۤcp!rABv3x}v[N93Ƀkw됫kp*d v2ׂ@x)a:Y-ۭal%enЫX*#+e3ZjguC,> AH~G[C79.1 вa2^4)j`ZА8 #wH466:Qb#IPi,I0IVbJSArO2*%)%P5G6x鈕 ЌsV/XĿjA'9Au@BݦZ1N&=JOdҦ6_^<8͗H ߷ؒencanbURskZfmrdz]I Gp*&)Bm뚓SB;OQfJ0?jul" +K`K34O={@M < HhƎ?L'W>UBꃊQ&A(CaN]9, 1Fn 3Ӽ ZIܲ&H؄`sI?<~v1ɪ Ǩ8 XYOMmvΝ9bdkF)+Rn]!)0R|')K)0DJbfm-: YcFM[OrF_/M&8)dŏ!t[5"@.C9PQCÚ@Jit;*E󼋸9᪌Ovxq3pӥ/But7^+6$]Y@ S z$>|7?hnpqBR6g[Z%:/n֋;ag#ƅEg(fyǗ^?1W1_Oh](j4_ZQ]֬qYݡW|($0/5~q/Y{9`6P.JY7dG,WugѤٟy?jPn( dB#;J˔9If tB;q J(!$ӻ: 8yTOٷ9I3euliNғD#}DKF)Q?̴:E(EItQ:S_4SQ!FgaHl&"e-Onp`Qm=xisFv YOtAteA8eͥ5U/^Xi}P ϳO_E5k|pB.}B4ь*yf\_0.4:V=IHCA@O*{UXd}kzh2Hm65=  Y}085=la!Nϕ6bBCK2~~:>堍W Jg+eI#\͝S9HSSxU2p(yXԅƒ.5{FT hޞXӱ5$+?<]3je@AD {`VmPmzA] UԔVԂ80*r)S0@֜߮ABI uE&7N7q jQ=}@7 e!PB~= tT%؂a-2!k㎉J D8.M)Je}.$|oцR*3ym'\SV?[WtE\ï)ِ vxrhT @(uF .J!^@ŗ{ oH Ty# YˎǛAHF HBq m[K5u)xԻßç_kpkq{x;71\i +yu6R$a߈gy5Lzȁ_@U1 ݢtȆVvS j-8ȵ ]/mհr~wab$Bf+u~S XAg"U?Xy{;(Ήv(HAv=yѤm8: oiossmdBe E-nl^S|97 '@i0}c`Dj^i| Gw\-$h^\}#vּ;APekxǗ\lBC"$'ǐ !D ., [ [UjYb%ЪHcBKOǓ0J֬q"eN, QP]':OJVw "8Z!o}N^+zQ*&-d]}ӁO]/a$%U| y-o3JKDxO2~V5P5z \p$4N*2[ZEVfNZ9EB9CitfK|R~ y[y/ߗӆp\c/u^{@(Dm{E?՚u֌jlv7AE QۛVye11vidY!} <JDLRԗP&=p6wN mNMUhH}K](Fug ޴ǹ*ҥ0k^kۿK1Y臚~Y臚~fR3Gd1RVyifVk0)0*CR.R.5Os"ĺ;廭aN xx i?$H!Aگ'H+B&g1@(j^pQe-Ewe~ pb`陗$ܼv[$ٓ XZ,#N"_" V[B#6^:DDts`L6^ls~:w 2VCԎS_굛hg%ƠWal[OrXFiqI -c:g4+_rDU l} l,w6!HrA,0U2 Kؘw&(Sk$Ӱt*#",ˌuQe .FFGp,I0{)0kBX0fٖ%Љ8+ғeXEڪ+Kc cʪ(RB`t&c@U1JdR 8Y:oaRکɁE~449G6$T#9/' ]S b0sbVz{0]@r"%&4m;R GWӯ^7b@&ޝ?͵=}f|쳂3E˓U>D PY!U$R>8O>mיC"M@%u[qE8颗q`G0+$ Fսc(^\)P-yMR ++%Sp12o3e$ FHTpE5Xmf2IX tc$jqu#T],#cjFZ,v z1r% . DVvbȉJFŬDbWDF#aAGf a91>͌QcA,cSαsWd-@d\' ]S_(%k&\# vH CtTTJ(LӁ⣁ԣR!1V=l@&LT~,|s `r4qR4Yۙ9Jϟnul5{8o=Yy8(hوZ::@BAgJtoM}7krLF[w}M8`= % Lbگ54SLl$X#4 HjhB1]2/`E@jy QBva ͋8MEg;Fj}_\0^=$g|{>؏!(,vk¼`9 7 ^,ʃ`aNfDBW+Z7ѿ_ 8푣{ۻ}yn&UHF79:mݻٳ9|mv'l{Gz,xӷGSH8$$;zv # (H!AגB_ e"_w&VQ=h4x;1rV!+ᤨ0Vبl GʑCF8a)\ހs_.'#y^1wv:,¼?ήrV5߿[|C i E÷gOɥ`'I/y}`ۇwgQ!R'ɓO ~38LƓ|83H=On Ɇ(A*ƌ'зG1P)pf? WRS-;ϱxw}7cETG)-8t<]v x7tJ,,6#ljf1`R0-73_GRV kAX+KD$=IDz0o"qN`Yl^`BKލ*L%`)B'-gKgZdi0y Yo"T/~+N̽1|"?hJhA=<^*1#aܤ& dɋ=K4L?skF氱fr ѥ<\ކۙBKa&&xO,]rg|*,bɢHEl-= Ɋe^:\- @SYgGM5r^EttBU+\r(vܤ+=jtoVV|ҵrd 0ї:(g8d[s܎ 7nRfrd-rHl:X-⎃G;O$s-oMp{&*OExxLۼzO vpoKjIٜɷv#K = TTWB6ˎo20_>fc9>㨽2VX9#gH8ccc,Ž7!X 3bfrLQ?@,+*cɦ?8}YˈZoVis,?L!cGȑ,1u?~:Q7l1wf3- ܊9`4Sw ^8pM~x}RYk6'ղ t9iJy 3FXed䢏: ?e>Lz8~_ۛ|0f]Ss:-k!MQwb4n1 ѳQC&]ۇɗ_ {^z çx+V{>jum|[$ -c3>5ݶ<:=j\Ewʢqڕ<`W_!Sglr./< \VmYIh)fC.Skl޲E\[g.4}V8k;7RFBtE7QqY \5f*Ҽضʺ M{gf!\cb1s3l9t|" +J*n&|Rt}^lo?i^NtI)Ct.ןp1(ܕt9Z~\%E6ZB4MakJ{,<->Rަ 42e#=V6'!c'SgezFp\֟{]w_y "G'c$jHKY[)%eH4OS/4;:(bii0yl$a:3wLc&W6迯A}+A2ŗ SbBύ⻚nClae kGZՔȀ #@|DFq", XP"/bDDk!-d8V"MdJ&G=w> 1m+tkd?Uw\w4P:dRVdKM vm陵ts| SB7J&8y0\ۊbJpȝe9BRu<"x~:d3͈ ЦNZ!UɴX!Yřw#HVڢP`Pz+waX1Ɨݭ`pAʪѸA^m`$ȎºNh_z6=ܡhN+e?^.㐦 i!i-8yI>< IMgA-:)7O[WQ-"!.V 3|S,<9[xQW<+XZvt ) \u)vP3C)m)٥V ehE$`J<'!h XZ9@sm,HR*b\s#mh[eOJõ7.eZiq^yY_Fm#3^ -uzGj ⬎#"ǽc#Zw K*o,|E>D!Aq+T˻!a$U2MK|'B<3hc&XgRO\^^!a$nu^l n 0 b{hy]}~zy~fVo5{/VvyB#EY)r)s*1! y_v9c?Lޝ%G`?N''ş?~qP ^z<&]77 !54 P(pW5?8zzrg-$M5ti,2_^3R'7 Q":tW.jd_'#P@1'Fwƴk0bel}U3\YOt]3$^֝N9#lOeunwHUNj>3yjYɊ-∤3i5a#觓CcH%GvL2<j X{=P`86 ;a*0&R*Gk`xr` p 1\H|xrQ2 qV̴ 1pV{>=?z?BE^MT\G*4omTJ$-5 ք  qLBٕìŪʋjU9lLx[f5 ZJQۄjKN?k;37RSMoh׍Yfh9"`ؾF+R&1\x .2y .5ǹ dm@)#pB fDco h4fcOUv[@Eqn-Ǻ(ݲ2n)ljT jR#mɵ#ߦT+(NUkO`iGT& PcUF%#r}Gت[e^5,jp,?çB@TοX,. r vDRmyxxEG1DHȗ*a断Ew-$ 1XD/-ؑ#Jb FA`K$ <`L+Rwgy8BR@/.ݟswi㮭0<>\!HZ[OS$|:_&F8MOO^qOxu_}eegRfygt0neyӮ霧NR;coqn(|*~{327#s'1ɵ> #72Qz?|ui;&7,߹>e,2e):⌆uS(O#ˣ~oFx28毾/taL>bE|`ҧn;x=fde2f2z*58o(\Yqr_ޑss;t[w:+S_vl[uR0Ly|m &Dh L0E)x*1:mbNޛ53yDrGM 曃֮#QKZrQ)P]YHGI Z@3> E >Qit#ǩSD`"We`1,jB8!^;©id)S۹`1l"׀ `tA[͘B*-N1 X;k9#NTL!VJz=X>EUWz݀sv;+!7譙*'1al%FS5ZR~+iS.2XqU6# SU$j"MW[8L&2UDL.0ϲ'^U'4HI0K;RBϥ+]UYGq7+P&>Q~DŢJ$69 X7`y]Z=MS%xj5& Jx5^[> dFȋ߆Fi?e+ )w6YswI:l)Q_n',LRgm͋R/ˬ3-6NsV^RH@ʘe2S<ц e ژΛfuaiNw:oi5za, BU( R03L>3<8D}ق6nM確)3>e:ich3H# 'b-j)70&{'Li㛙lo EvA2J$"rvǵyW" Z|Qۈh$h~lֵk;h)Qj\e*(Bh&вYŢ|_~WڳOPpӱ#DɌBJt ͛mF1f{}ϲw?] +fTPܠduJp#J.߽v}75.XJ0pЭ" Ҍ>(]@D}MJ.G~;ӣUcQ3Z~I8b%Sk:i'zn6{-]FA2HNq58zzvݱ$4:.}x ZKa)#czDT^9E@lg !v;ne6)'6īC9j\P|M{Ȳ:$Brfz'Ri}eqC̭ ̓mTѸk=^jP\4 @ a :, <75_K&b{}!…ʴ 3>~Uybc% io/F)Y3ح= .^rA f0#Bx_~LBg81R0Ҹe\DN]|~t,ج(L29 jnq8,?1ܐ$8j2\?SdR[ol01mY{E ~]%dYƷ:cjl%N{*e,3;Xש>Nh$ z{Tx( uj.ok鱗2r@ckFBRD4!DPLidJCoIN5f^J.w-qkEywYTw]lrkhAJGEBviCĹHq$ɉv @Rq o)Qʣ rL@^` h5H!E3BJhKG""rw"YJ0V633Jb,qJ"buK+"2D;x$HNH|@#qHa+PvmT"Be:zi]q8b7'p$ED Of쒎Rq¥^0Z9ePJ詣J%ˌMr+ P|Gqc - a3($V^=0Lmͪ1S.L2P*-䒈E-I^/p:ɔy IJ-@9`]J!(+FUBt6@//Nyr:F#bP܊cW *n̡xdc(b5~LM?(0_4~z99{"uGq?i,ф /.]X>>_qi!N3|Wܟ6`Y~u?rx}~R3oqJx}Q\ٍA\Re&HoU:A8QLsHeˍEa#4&X `+VpXvǀV89W26nX"%re:x^S I-DхRǩ-,r`-+$U|';8L&N4H+d%,ݒqbQ_=XECstLð COX 4~M@chRkFeްT"AR5eRx|±"V*vcmw`% Im?nH|l7'k)"!&Z#)FbDM `?7EJ0u8U T2*^VOO&CEs]A# ;MZhn'w˿8ǓR?NQU!NNէAfNcpx, OؕGEoG 1fG{?4Am>|섯ٕ)BH͜CS$+9Np"˵-W6BG۶$8ұ$dN4A1I&(&oY$}nA Y*B.W#$*-4ԢɥH5arY\$DFJ/%7Ļ;$(+S{uj"՗څE|ze#j$1 +WzڤSq[Bx7_n#+Ȼ_λ9Tgs?usJB O`8A?=L`"h,lC6~t̬'%>ѷ}Zyibso؉/L~_4~ GM7yiw5Y8mŵl\(a#U=ϥ ɩ:|g7_;eK嶢&8_ y}ǽ/778|0v}_o#$T2${];GqEѵ% k{n3j\QkO5AˡP*cEFs⎫`b"p` YI^PbM +ha)V!N 7!& c !ˆkm/GMX(X,lDGѽ3x2ô1{Mwr ïFZM{]k;sM4ڵgHE?l݉5Kıg>ՖMh7ʽ7|-`F_豇6q=ÛOhǤ}7gT 9s1vExײͮv̭wv¯Q}.C2JYi¸K F >h2HiUBӪr3qb4 57n8{4z2QJ3q-hGnpG rO*4o$;X{nsRJ;wnl!=:Hxeg7sjOTȷPO>U /S Мvmc}èaMa/yOn#\”ȳ1{ómyͮ,miC C\Rz+ƶbo+~,t{apd$^:GBGHv>!=)JnKzh kaՊ +!h˖tt 3'b^uax+q]*d>^d|-F>S̚n޴n|Bg=&8켇0;?9jdIHŧ{7dPdnO{-~KpCnCN<:@I"]7/ Va3,tˈ`Ϥә 6 V0^\0Jl|`:NS$`rE2|2a ~i\(xiN@2(Ss`eQ9DɠdJтgTRAp$r9xP@LgL#.T*r+@kGf(\RKh)?QΆ/HahZ2[ሥK?L GAh3qB1(vIR\vZh0yv ScQy+5Tēg!8;;<.'OEЯ `"L_6',(+RDQЩH$.M$qVfR)I28hbVAx[c|XHneFL[N] Km5,1*k&!.; 툕ce ` -hZp$8$"P]ԅ$.irB~岕R5zrKL! ŸBǗ wiV=9zg*/,yMJoM|dATī@G6CpU7o׷'{7.雞Oi5ni_Ҫ ңqdDQ\Qx8L"+AVG}! pfȳiU B06]^߸oIhT JBwOPuF-FyJv{sFGTIN)(4-{q ViA_t}w~C† _7ɠOﯮ7־#(Dp_(G$ENWv:a6\@屓_onYabu L !֞gFP.4edw[L@2jtT lɈ6 L!*)H)r5|БV嵽A-e_аJo< U_6 ̞ŒOT/k;[\iBwNbI1 Nݗzay }+t:tsnSYg/nEq6̫McTnjUq@[`W~̝KkZg'Uج{⌤}N]W^Wƨ=MEҴQQ+Z\a%qE0HUި_gDQ9=Jud5PQTpoU.q5\zfR9]{M\v`^e%`!h{:ӄc9N:+ЬŹTzNԿ3;ȥBŕw.>Ebî<g{7os3''=J1r6n}| ma&TJ P[7?.~/ 4)uwç+F5¡=0+W 3/Hf [RxcxUYj6վdhLzp]0&wW[ q!7mLQ+5i(`GnC eGy 賉՗siS2\ N>sY Iy郝^ūOákTͲŻW&/HAs<_%,<';P*ZoJBe Z5겸GtuFPGӫOf~:l'C] wﲪ~'vXϝ-rH,WHG'0!lP n2 dTolA![~:D`89PkmVs0+/2},ƚP-ops_~zj&t:nP`1gCBreeVJދ'3 Ȥ,Ef8cRdj"@e,]7Е6ϸ :]$A`Z,\)ւH &IO@{+=`./nbumJ nkJmm~ۖ.=pl)&{B3e[F43*myXR@ֱTB0Fm}A / )kIZT Lͭ\S hلteTPnAEq^ z 9&ԗ@@ʀ *,~J!gwjrw"5!UPdiִDB)r94Vp{!`"bKC#,lI0v'I(X0EMK$%$PC :Rlzm TĨakTV\mW-9"#P4+x*X}(W )mh8 pC1oϗ/FQ{ N[EeR.T7p/Z@8R T"VΨ{eީ怳^rFEdU"FsUem xL Jl뾚|Q'+\vP1ng10\zD7+gq7 :;P|?Dt\~r  9d=v1|Z58ڜz5(ҝMAyY3wFHBɸ>,>/yKpYױ&(PBX( uI t!8gI" <W؜:ʁyL+qVfFiPNkJ,!F 3*BH&'ߑ]B(*k7JqjB&%lP{\nu~7>{]k1/Syjd<=;=M>}9sJIHK'2'4 0!d%)P \}fMJZ1>\WH躲c ޽T^lnu͝K6^ӏIghl:ѪN ab(Ңm8^vŧ/w$pPǡSv/Щ\t-O@_J4bS.dv`;c@4Ҵp]1օ,wmJ~ /a13 d7\ydȲGg ߷۲ZbRR n*u!\0_=IB; ;IuP_լ+J[gE1|vXF. ŹfXfy cܙ<BEYKT7b^͛OMV\Xt9 ~}ǭXRƩ2#λ1qJGRo\!N*8yFa"m{չkSNNa; 8=$rZBCn=#wGuVm?z G>zLZNq8SU;RҹЩ64GLeN"cP2-2xߘUҍrpb@^UްOEË5N5!uE;:窡 i;9;b)@ƺi 6dj!KrW)RUܱ(TtUYBD%״w\Si$H|F <. lI,Z54iߟtG"?`P#y\XpAו( %^?C1~h|W~~`J5m;ZEvJֆHj#nmdV2[kȒ<TN*9+qDpzI4fVaLyg&gk(sBo4IMoMOT*)EݵY3y!xn6,(a1 QM3ernV${"fCl|m dTE1dEƩCCoigTwU-89E(ތbM(NT#eo ~H/Odx nGn&LoΔܱ"8CÀg /)?̔v&$KhU{܌|?Z7L f#5{0L %7hyȇ{$FM_ѕ<LS+P V'V>=g|D4EU?Ru|ߑ~=#).NkjT nA;3#׺R-ZI\wJ%'ŁՏnfD68)Tpa7M?nj s:Wq99q@VdWGC yq9nQ஧ktTE[vp> g.*i k~Ā\F!5w!U玗|U3#@gRvd+ucO! UȊpɂS L2㼱N:m *tsĔ62` MpɽEZ%)ҲqQe?sq2 1r0kω x^ mF(0 -V1~XLCW5Qp}u˜Iv0p nm%wF)Z^Sr d]+uػdTDz@ȃbouMi˓oŝ#6}D> Q̊5N1PGŗA[ M>bc?-<y &U7^p5 j'4H'ySlKO:|ꈓ6%6j6X q餸L#g?*y#N/ kkuuB֫;x"GyfMu%;AV{R'o6BXvκe+ fDWzߎFtTaq0z|f*W $R|ق"C3x_:R{v/%oqB#$UӚVvJh3ɹ X9x/Q.Yig9%jqr~ n>\E(U 0Y#,:#~3E??|<6`r݌=z#?ZHèK3\ τU$9!AIڋ!eXu oF)h7ck Vo՛8yӟ~ ޅboA; :Oy#YSxi)R1e!,Y<"1s'BWM,0&!NnZA'ܶYSڼ|ٗ_-=0%ӻoRoLһfvsF>N-+9/6H/K\BG?-r2f$E4IF6p-K[*1Fv@>D^ѶvKڭ 9s_%u+Z8)\u'a9E5̺֒]mk\iZg |eЎ6bʦӓe?x_?M__3?nr%~N@xM'SX;v̯~ݟDeOH:V${$>).\J ^dR.~<72߅HڛE5_ef":,5~r?hZI?sͰV}Vϝ9ZL",/8!%{h`ql \|;Iw>?ΰ+=(3$6CBoz c!f] gz K^|1Vsv~Qg,epa> Ta9D`ODa ԅQV!07j4\ 1+4[SԅD9uEhL+r7&b.%_~-"ZINτ}C@0]Y~i  Vkdbr €"#eμi .h*aT- %o{/JݎEMT[LLIԔiſ dRBEn@!lR'd 76_JV]6rF.ȥˍ\Z5'9枲qX P+qaINs0)6#ҔS0KY 3֪ %i.Yr^C{~\Y>*BRBDXѢ΀?XPl|.$4dV _9%/ˤ~Hz臤~Hz藓IŵTyLCpfOQ@?p'`>ރO](IS_ :O{FW.,Ygge?{=MI1%b rUK{8k`hr [WւҖW> > #q^^j$=X5HZE0! ,L`lUN7H=H8 ls9=~FbFhDت|ÖHMb l5O[^$d"!  YH Q' % WB &^jv/K-1eԜxag+PrSC." 9xn'FSbr,4k1:P`qj5X;!P`A{l\)Nj)p p ͠:S6VU)p1c9%`Y ƘaK0!YAR2F\ȝhpV8OqI*-#Iі|}^LJBO&ͷ׵1 0:jš+ $5sv/p?p"n{t=b9xjÏ}k2#QV'-?*=0>ipa{x! ?z{~x2]pp8}Oq6Лٿ7w ~x@&thLǽ`*+L1#[>N}W0nxЈ|pJ&HRFfː$RlNqK| $ CH[,vP \o$ 9E"z/&UދyG/zIk*[<-U[GsvnZ{NQHWM `uveNu݆Mٝt\vhO-drZfjRm)2rQpUf2=P]TRoP25 F<0j$:Fadvy5+CǮ+CI):ɧs_;j!D)/kuM;ǚVIkr~ Uߚ}.)VkgU8>|i|Mhy{SZC $)fأ5W~-Y ,?{2 QCoB}n|3`,xFJ1Zןp^,:|ӨkG~y]/}MׄN]P =[Zf'Pu}H){dz 9B幑$Ӝ d>7^#^Bjl&xipaQuC0ndN2|ys\(sJb0D#NӠմ2Zf6PXBа՞y*mzf=prUo뫠 L&WxW)ӟE2) "IH^HR`HI W8d'Q,U$$\qK5&˔#RqzP #ɪ DR;F$DI^q*1C [qmv,mÆ7Sq]Mfr2&5r% lf"- wpj^PsbE s9ŀl*ߏC3~-Lc%QPW{oP14;MUP?ϫ>ƳOW}un9u鿮D, J^< cpl 7mi/үS\u4ӴN;&R#K(9q=߂ԁ:A8"bakWèQ3 Qktk&O3n,%p nvsv[VwLI.WA@` yzZ NeB_ǁo}'`Rnй)gHXR0$C4d'Ԫ %SO=Zi/ zTm_9pt5KւSA9ai3"V!o2gu"ⴜ0Q&}8.w:7.=I竎beRNQT4oFɌ]9ՁjKVN2!Q,kVP!~Q1$,-30Q(JmRIdMUY* 4ٍ3_*ZsAuA Oꔙie-8 $WoL.eS <8:\q:fV<ծmׯkZB+D.T% l[Rmiƴ0MSG8zBpɉcDb̠.8Ք#fK&RS  ,eTm4&\#7\U̬Iy=帎r-Q,|AF&2M"3hw5aܙiYWcZ%Bo*a|LW;xC mdV% \$J̶|4$oGÎkpb2?$9-dJIT-*+䰵ukхI}9&SR\[n<3tbڃ'خ.#+pDRk1VW6Uń+.1՝Tw5-aXVGMM5(%dTGE}xYfɒ!R*딩:A)*f~*c}T x'][2$CQF7*4[Y_D%pע$Mi} Kqx8xkqh$_VF8WrS ԯ b?XFOٕލKR01wފ24aȄZے M^Ri2cݔcU"mCɹÛO@g1`lP4mܣ[C'L\5W{Xizل4pyɽs9dWB&6(XS홙AФ5O4 ]mM=:Gny$%#52SV`LHiVpi.HuؒkPY"]a (͉Z۠]kN'iް<)I'ݭ6Z@`7xV?=>J4Q2fG>.]]M`d$~?$rr/eʑ~u֑'ː@T*mX? :yPAq9dr& ,/i"p!^K^Z\r`_,HLJŸ/z, qMKUf:Pu#uXѡVWMx8('ю>sVcTW%J= N 1t&U^ΌPЅk{\/,rG_;>ZA l ;n+iwї}p>|? ;t|gH5LIXӴ #Xi^M=FdQr Gx7  \M W'6?oߖ[;_F~M?޴n'{]j44 F> ^ m>]E 0Kh90uxFt:׏`" {&WvB~4 FŨ\T۔V'6LqDZ9v[`w;ω-\aIjԊ0PzjŦޜIRz  Lrv-%hz9M&ۄ__Ƥ)^XϴꨝNܖ[De̩<>&9cģG@8^J>. ʾ6p{5:crҎ&#lDp)$E]@ g)ura^x^x=4;ŇB0,z^QA6qa.\~*aSh±X`َ(I|F5/KX˅d4l-uay`1Uj[|lW鍩is DZF c[Jv K_M cº<=p(I// Ws7-Ҧ/-͸x6grϏޣvj`8/\ٟa6$،I5w(0zɥ#pnS?[*D._;ᜭ8eD(Og\=5ոUK? HzRңqy4\r\ J% }")_{Qx:mɊg{P Bd8wpnjW|r Jy fPƛ BdIa= AWlw'?+Rڰw۷=?gnk{9ӎ(RE1uӺWR-?EL1/SВ s{W)Eq1+ۋ+2:sx.؅ZSxAk.bkcM7Lں ׽͟x}sgԚ3mB3kvrYB#͝7 K}c8yv2 [c?W-(.CnDޯ iJ[le {h#tRN#K6HMb= -Uf3yʐs3[npk5M:BlN=eQ(&hZH:%`qAwZ)*ol4/X%!tvӑ&X"ԓTd,lLxz hSv%nɁ#Ľ2HEˊw}*.j<bFؓ !O+OxT 2-+?x, Tyl&f$zU^?p;m`V[֟(W.)g¤= ݧN>;"n'0,PVbM1-+ŧ`!]6]6첉SwgEwA|ƊDXrL@`XϨ-[ayd1E oJ7,#s곥r{+%ex_*{ Ƶ,`a~N&Mok;s2iIW|!e6ȵ./s>3 kلu^: c$UI wнUl[03XJBLkbĨ]ƥG ҝ1JpĨ%F$ Q6J*Uב7`8&zȋoHY=Y$3FJWNŻIiX4acMD**6n/Jo4$cL՗~CWl />SoO#m+Lf$'wʁTS(rZH;ILipwt:9S`+\y ŴW{6Go&7mբ-a\Z`7&*0 n"ꆹ6'7 [] ,G>%+BIjPKf3Sg kKX ԂVǛITFͤ5zՁ;kxir|vL8WɌ۔(Ƅ#$P l ߖ{Rx.B |  A80tcIZivm1JDP0QM;2` /BK+<;>a 9}2Ieu+CUDb{mۍ"&w/͹P$l _f) R]WPW9|Ic'oU9CP>4}ڸ @{f!6W!ŪuYs}}---O_^=>/%Ov,l]|>xE5M`,N3iȌwa hR*m*4 U؀ H0ӗ ~CfRJd$IH _ *"ũԔx3:uO U>f,c/w>ç8M>UN;[Rv[ƙXSYS%+*0 A]_49.U+,Un;ƨW  )+9 6Ѫ٠XL B+rp-8|{vևt?Bsٳi.o,& BVX\<#X&])qF3 q+/QaJ+A$@ Xi*2Hk@1ץsPJ!؝oq*.SJ!x.ݒHz< I)4+js^P=O-nj pM|&8(Z҉Yb\?]Pٌz֏ƒMh\Һ dCSeL~r_ v_ WG:T33B@8zf鮷ߺeZ ;cAR @vl5+a'8ᡈvz ]j=\V ;AczNRZ|yXIO-Z Vwzkw~j๩vWr@u Id\=ɛ@ )Z}ZؘrZ!߳@#59]|i rz|\kaQZrz']G1jyww㼳FT|i~{QYy.5<s=@þ x_1TaY*Z>dDtyqEn^ ZJ֝-ޯZFxP+o2??od^yǘ!Ww##ӥ]/5l/<:.:M%N#Mb^U7ݾTa{6-ȸ?G[`҂tſ~#0IPk+8h(=F:Rt#QcOSǨD#R^˷RՎrk6UtT2FlEhUj=F8w߼I~cn` AxJ8Q x_@tt txy<RisGrq0DSʹ.蜪PTPA˱\׊7N@*A\׫p#yOCt"0Gκ\Ta~6% ʞϺwO:R(jb"=158֮잎vMM=dR?h8Q4QD12z[[nڢiw˷6m₋h%Xέ!I$=3eFfR&Uil4H3qvfݯq\1 oqyDyq&:m#U)k-'0W7!Wǰc$b#8UQY u@'J3.bV!:&(#$&hbA NQ*\3" b0LZD0I\K~א|Xd:w_VWCg29V/_XrW@x|#@]r{SF觲ݫxj4 #z/:5GxLq_S ]\29ՒP:[{WAktr ɚ!Tkz]^&`TTl5O9u0zřåbm6 @\9}ZyX7VsW}JZ`FxsfRf f<%$ bEErrQ Fjy.'qCPw ,Qʆ d9-:Ҍcd6L0>zh6CtK?#B'KA:_ǫDg'(B"_27(g2#8$BjDd ldHI 2T*P1ia*S&$y1EExG]pD*9~yb)Vf֬IB_3o-l,m齹1v̦&Z} Y@1]P&vs6BpnP]&{ٟ3D0ŨCm>Js9[ g/JC/Υ"uLpKB!iϴi5طK)mHSr @`xev9;8엋|"}HYbaLaK,8JʰNQU=i {9򎯀UlxZ[UmG>k$qyNUdo&jWan7 f OFtJLs7yeG8\2/εiw}oӨhVr<=(O2%X% Eh)ZmTzd׿+ >>{*/9A4řF%1g^oʸx݅Yi1(vw?r#1- j I>Њ8D 5R]aQlТ6\MS%%n}ת%~Mi x<rskK_ .M|ˇ({ʇ&/D&x)P#q𷕠嚈R]R!ȽCr=*Р06L:/GBA猺X~}cvŒ۵wĘt#I-EoT [lJ*tqw Hkw+b/FbzϠ?ϥYcQkx?]f8s[?>ӮW$zff0uuug6=eɏiN;C:1&;3u)xm{QcLTntSETDƇ-㈭m6^Z|W=|+>XSrzAow߾ɛj7=)ik<{abBQqkO޹O@ߜnV(0Ïzt&;QiB@G wm^ܷiE$kqU$OUbe$Q2dzV޾{n8P,%t6i64}0'xʹIҟ # Fe&beQR)be';tvHGk̂JC:b#03 -AX`DKL&(Td)B0|no(i-DuKtN;yqq! 5=RƄc^8&D0[.3^cv|y >o==O] Zs k{Zp)L&Z׉$>QZ1O. .oPmMDCߋJ.yw>7ݐ r|5/o$^.fY2F:?yۛA*R_𓐨Ԕ!qE""ۛ RLݤTIgoSȶ{<ߎ>fDޑll1wV,Zj[K9rOqsWu}e3: /ɧهuo}t(X }\Ρv5gSpZH'S( FO#TG">ǼTwct7'[pVS6fQ~t k<<mV&rq?Y$n??{Ƒ d_rF<B_-Ȕd栗!%M 9XHkꮪ#_#L\ HV+x$/|% WNfu:L]?@&ܾ駔S&k;_]vH l彻 ʐeL!Pʠr^0]t?Gvm6SV5ZR;}6vzR/w,lX Z)sfD؀7{rA]~|ִm+SuaONgq)5!Jtcn>ް~͎y;M0oۜ$$&;!/ta7Hevߩɠk.S#FC"JoH=t ܇139"?}o{fwI$O:K\`'Lp u<@T󷴥)`_,o%%ac3ᯧ0*(Z[nTNI:+FV&ԏOW_oՈU- ^X޻J2"^\eZy3.|qzWHلHxeEWBɄr8+5 F+Md:D F ӟ,HpD֢Ph*K +%ٚzTUB!O8 `QX+ GU.6hݨXK֜u^6pH,:deqemhQgyYCXҧw3yP,0Ԫ<]#F2W$=t}杒v,ܯg_M hy,6Av3#h-m;L Ƒ/E@9A֦]19kȘGMj֛Z޾a.O1IY b3W: 5Z5&6>3wJ6;sņ5:{2hJcw˶ԞENՃ)"?V5em"kYpd Vl8!9o>Y\xQFm Hdh4ӼfBC4w׳"|{p&kUZ0ρVQR7нw1zn`8fr Φpi0c22׃hFw0Mw/-k Ǜ_p&7_^ +ϳ)l߰a: x8J?6L%.ScXXb /jczhKr37< OU15o X#->M2Q*j>. ?TQѻ97kJf5S%KDϰcGf -r4jWIe89T>e7$gnW+D1ka-Uk wpَI[&->s"h"$G<@ɥ>j,.!nܛ .%nΤ[!i@"Z\^W3VD շg9=:3>P36!w%|g:55I?GOP쐝NGJ00OK2犒lyp?A t:> M2ngF|p(ψo%9Xn5Z D\Lv I@/x*؂b4|Y]0k3jf{ Uv8k`)IYe&/%H*,l Ut L"gqaSKX5n}{,^$t/#v\+߂φ5:$[9So/7Tyq哝<ǥ 1{ʵL|o |X?{/_vRfhfxMd >\}*Oq5w  3ZSqؗTɨ_"*P遆=d3BK`+@Vilv닽cK Q '̈ ʢb(V?w}:`%[׸]e$'g&&Hq<3x5ߌ:)PE?"#֔7 "A/<,HzѝOJn 3u8EbkGkS S$?NmFR3yYu/>V";֚-JV7~o, #>a%p(Gp"P MW.qc$X>kA%G="\=j0SD<~SrҶ48AE B[Gs0`PtsQn7^!M1fV[ͭh7(R^}#^>8="BN'l|FޝyW[NҩZ8ֈ8©#j'm^PtT>*̅>q$~,>@[[%X4lCDc=pc&ЈԳZQ`0 LM}|k=&h ZRٖ`Ɖ,Gu΅DK{s dXo'S!:؉Γa?t1]N1xhQ9bحHZԭHZr!޽qq_WޙW7z$u+Tn\[QP*rU[(>\mm *#+8``\X۱)r6Eʼ4YSIp':hP3RAW֑٘ut*zYќsaĢ`bbG/fÁG A10P+t?u¢\6go9uda93 ̜"8Xml0<EER !_Hk v gEk *S /@3(رB^Cps1 0<-Z ED9*=h=D$Cbčf6%A+!7874{Ǟ dћ\idG#S3DYc:h"n cJ2=S`+E!Hk=Vןr-3pcg5`H"38 IKmh9EL>KtM[U$:4vp3;)7h3fvfx~o&#{yz,WW? DlkٖMr2rt`MY#pEtY * \wNQZJ f/\e{aG9M`&#V˙exKϕVs\QI5k S\w2!6An ]XT!D}X/Qm 1AXY)tP :@FG+e2dYJY0|ƣ) AY9q8Q AK^r `ZK.8H A# %,G*ZE.lj\K d6(N *"-8NI.!uyAόv3g<%>8 BYsWGNH MX>K#u=ْrx܃^4xS 8q &uD|#%28KOq5b5ƛٕX&&(T cH1[Xitwmr $c1cHq"YLVs9pznWSopgeؙlwƿ9Lٞ^nHNWgn,l=ֶ;]U\&ׁjz- A[Tx**WsbBce{eZQxW7&rzOSn\&G-(9;GqҸvDe9'%5%g'`(L(kG Ӳ|n)6\8{vpA(̉nI $vz-$Ni1c9LBi쿴_]=d>}IY_h0߶DoX~[0m~weui]]wYu&[ǥ4HCƸ^(mjx^!Ѡ>FLS<_.'Fq4LAՅ.gfv702:0_ 6wU(-u4|_D25׷ZaԬ۳|CdOJY9(VI+8#%6lQ9#HS;xy&\׎\Sgh4 R CVj1DqIۀDT#1=48ZwSF7>A T[) bC 籖Q;R1}IiDrNCHRz4VN pW{7T O3zp _OԒnTAa(ƒztD,HRxl93Ļ$0XO:%U zSe:p „"0aJI;xh ϖNࠃ!&P9 2w02)gPtN sL2(GGaJthJ pLtǂ#&zmcI^v'P3өCIڜ+N/Isqx"QHo)Q JPeDh|_OOwOOD L_e (]F- m#}LSrcti$Ҁ2$M87&bc!\L3ljyS_肜d CɋE24ӑLJ)ґ|͹@Gg|)PZiJ`gS/](p许^|s(\/gm|,r~ 3IKO*LxIJyG_8kLw[o %\'_Ч1O;gn:M;7@t=YC7ݷG/S'dMM Fdkͱd?XEֻ+pt,yxSm`4v Ғ[x`xb"xK(fᡃv"WA8A;A&NYNG#ROO"VcJr|BT')ҕ{ds,HSzwrA w;7Vh j"[57U;X-f=h89jdPʨ= !cIH? cU1Y;))r= :er-JnUZփncM-4i0$Tw5d;r%gZJaI JU 2m2.1RdFl[.-%=kGq䲗ZSs1ŋeμ~Ml_d]Pp9Ds<0dҊL{e_-Km$4w'm鉚"n *vوcyqe㡍yYzQFJÃ(\Ddܔs]^Lr153;'/€঵B㞁OPoia{7)BYLŇ>a<ŲI@*= ,&Ofh%0btLɌNuQ̻ 5hA"G4j2tyC^^+9NZ~@ZO6ӆd Ie(N|01er{3DAq=zC4]*6zw0*d *dNsʥ2r_KԇY" 6 +8>j"tX-\:5kn Fh4vP-fjWu\.`7Wx&ZBpiQh?K knJCNBQZA3<:V0-5̎Ĺdm<>aOj.4i H6T}J2AK/rG7I./֕zj,޺"9Aߴv4ýYq8}[~L#" } eo`XsKAHAek]kHxոɩ^^LD2 %k9FϢafcM/^q,Hr#o4.-n͡ʎJJދ7muv%9`8ԝd߰~[]5&7.78Lͦr~< jU{) )J_(p6B%OR$s}Ҏ[ "6Ocz牬z%߮=09ȍVYsic3]FFTlz`3dK'f(6x"s LpVJdLD'j9A` q5܊81b&a"DHscnƝibnYG=1d$ R?%ffP"P`V = Wq4W:!hƕ4gK3(=WK ?TypVHqSɂ=I.BswMW}I;TO]| Om(/>Lâ &gWo ?1Yc2T0 L (vV"y[ lb:7P2;7P.nZ': -N`Yssy;g|~yR #O9[i[)g\ޟ7Ҹr\yLޤy'&o$ Zu//:Vlj+q2$J@8; G~S$ J&j??޹ɿyMZm:Tw^=V7,FXK i_ٿ ΪeLs1QwnB:7N']]~eS_?C^=Pr]VQ避O\E厖[J&ON rmZux&Wם^xMRG/9%y.s'_y=(]].>w|sOK鯏s~.jۓ 7SVݴ_޼N=zjZPj>B, D{]QY}3Y& o@O4J7p/3ѿq[sM[*ZMvBJF??uГ܃FO}{ѭ0 :0S%F\mxs뱪ā@"85/樇xy^Su͞-^6<ӈa|ll-ṅTzoIj2~6t#b^9 %-r=U(uqE|Βw]e4% Z6vLʹ@0].Ǥ#$h?$3,z)qf&i̛})B!R.Qp[šI4>[WxkZކYDUv=NԶZ1icR#-lR:D(9&dD? i&~)N=Lj#S6bӁ ?a@OLoc[&οe)B^ wIT-UhӚ/mp|cir=U`vXm꡹/a:\km8GD(;EG?WP(:.P> sDQF|d7Ǘ\l޷u\fWsSµvQF5 ,RIV 9#SdWXiYN8>[ >sn]@5ABֆtlZ'1j6i3UllnN=CV]87g.3}$Zx@fuO~# )zd#6k>B1"0{_<6K[ߏ`; !嘿iw&̶lV>ޖqrJؼѡڋ=(_PQG7 ?j"Xܷ=_RaUCs^+E 5C:2+lr9l* .P¾lrC| uS[h(;2k[kŗ(^շwgx2\|(^BDp[VA8흯&gDb{fQ2)>8%,E`& h!qq2Ƴ!` *Jp>iuq,*#Ϡ\FÅWR`h+.ҽ- [nAdvk✬UN׾?^| O~|ꗷP_]7 *y@Pu֯Pԃ Sҥ/&|=tAT) <ڔJ)^̫~rFU`IREDG*SO(̏fy˻Y(N 9^ d BA:DTȼF O62NfYH |t @56C( |6df9/2`O$s\#56caD rxN|҉R:UJ'W8GLkG3DEC A9x <*AŸDĸ{0ZkՏnTKmBO\ OfgzAݻu{o]([?r&K;|4\|nB}Y̕q{ A[V# ٍ}Vvl&CS!Qn5Ut3Κžna[/ОwU1:# D{~m%Ŝn_0Y"y5.WN}98</>,Fao;'q Ϛ;0|+88>MMeYjGPO`< }6iǏv?!p;8UDtnpqO|=o&c~@ݵͩstd+pOImCi+:7yṶFər3meG 4FTʪݷ#0qCsicK3L?Xj1α:4wz%8@ s (L%%ZIJr)mvg[SXP2zD(WbǼO@Dr(ԸBHv%!ԔON@({"z9W^9KlC*)}jJ&P 8бjQ=iK-y.^ |`w)f:HWS黋fiE\ 4@P3͞mNC,mHl.-a+ftinٕ&^NS)R%-),Z- mC4 76 /`ymbK͍wgSʂ 6))Ap}3$ 5aQy  @{F0(sBCsh"K|О;)/h)%JD ) ǧE3XL&#D^r*I}-=N(%)hg9 BZ=mt:Z-=BUS+sqou5IX5Vpw7W#jv-lHi?-9˧nqR ç_EQ؇<0#"~:7 a i#l|.𷴘vT۹ߙ,n<hCN#"Hq^28|q}PNeFP0b`' Euw|BֿMViB1:]U%(5oQ'O]Tf< Ki5HȸޮR-īxPeI@'&㯟"jty|& KWܜcveg/(} QC`͡ZKmp8c(􅕠FmH5,4jUj 1Ac6LyMe+Ej/:F59 ĩǟnɲcno1jg_W6 M Q>zxZY?)ひ-b=x;}O -|9*\z$!\D;ɺv^ Q5 GtQG:޵vk#3S!!\Dɔ[ ly&5>83>vkAB^ȔΔ)}IcN3!%BBjBK\$`H|6r*॥.%n*Wi֦Drh0 v%㔱8SDGB!0"ĠRP͢ktFq d1ݰ{mo*T1tCRAM:Cr̂ UʸWAWp03roʈRRɦ{8S{ҫ/]wη*w[yߪ)3_\[z1|6d:+NxݛF7>n6 -sr!AyGdq^c\1PfA+ЫHK{ 8ⓐb wPcc˦1yMc4fe]}ch6LI5]!J G,޻Rq1`ӎS(U!J)aZ+ŀ`fgKZ%|0KQ J$Z('B6<⿖PО䭫oTJ˩<T`sZ,'%1ԩ(Q`PpQk_QFs6X0RD ©E [ga7 \Mf b=e) ,ʠpVx@&F*[˕{s5ӗ0} bkv ^NRuA!y>?aL=k\l*.[ 9jK58nk5%ZF[#.B#i!6$IiMix\r%Оb+nTPT2~ł.uK mFkա iZ)NnT3_}}Fs)T&T'$mSTvJ)ĺ-EaXB |(|\B-A-~x=YyGI -JFk彗EQg r7N'='#졶ŠVfHUDXw;ao%OB je2^hc݇7cʹr`ƗG81pyPmFZA"EbhoRa^)쀕W۰(s\B 7oe))Zd^ɂp($š6s8d< |EJk?mж9wa|'όrF&mJ-uRhD_+qvd#75P۲@gCf3*"wZjAM\1#-D|0T#OZ tSOvQ@hLgӢU\?/KFbl[.qw6v"Nq]-"~6jrl}ܓCIcAmJ"@ T(3RRQ+[iW`DscYS臖xF ]2%ga'6څJ~myzx>jr6Һ1M%Jezh"~7s|zEJrHI$P}_/ Ha=h2DŽ0# #($).|>J__ޢlGm+W3TiwW3A éP> Wџg>]yYd?d1V eAЊ+8j@|w}fR͗*^1hcY>X1I%.?!VLV"V6Ndsy{5oº\cFr\BU%To<քma`o\@@R| 翻 ~ͩAVvy1rKGwW?\.AweWfp#hCG>B3`4.9E:GHsjFiBXI)Z PhGe}Uܣ >B}PVVa,ɤ=R/:Bz9[^e]/-BO|:SN0))sBCQ;"<}\yDa}*>] Tlr֕|Ӊvyo#֧ڦ`u=RyZE_),2<0+xпJ5x PXGq$%!+B, >xIB*>ъTt4/yU~wUnW9J@ Jj;UnlػƱឮ8_6!e Z{v8ӟO'3(}r,SPЖAZ-bo(VUA5hwk?? \49U՚TQ]](ㇴEQ)5$>uw[,/no:g'Eݷua|U̖כ|Ljl ~zj77ooj9*?}ƚB͋*n2^?_H쁲"-CIs'j~7e&0kt]m$.Y2Uݚnh[.mD']䩐|4m[ѓe cp>]zN&q*$=]|,ͅkDŽE_˫Krkmurݗ-_.8ҼiFOf{/cIڔm!=bdJ9*<51BI.BSkiSVMd7+8xKja4|s*u8a2HfJj6RIQV3`0roۻWD. -k|0\ZKZZ)B Iڒv *oi*QǍ\{(@0è ֩lyGF*t|0@ǘ3ELw= I" j^g2Ip"2o kJ 5ICW>3kئ>Cw9k!Rr9ݾ̦sH:O=/[ N8=Y*Q;βb}3&fshcWwmMATffy~[.r545=˜R p 63cs@?=ic|} ']8dbZjs׽?6.=ܣaEiL1ygus +N{j:&]Vz]0خ GNMykP8Lg-!1W2fҠ(|<Y4ۋo/*"ؓQ@ TTIs .ޕ{~.{*ùJ5WPnr//U9v1V+sd泻jTDdIcPdC ] [LEyUH5w˹"pRJ0}nJmèğ+HC؟o>~iۊ Ϙ>#8P) =~X "΁n)7]߫.ÿ|+зg' 3ګR̛&"#*>kc/xwKi^94VBJ{zEǪ}u j ԟ#-'+>8?h@J:~x^vClNJehXq.}S_m+Da @z2bzy+bd[=a}mAʛg 0Ȓ=M3&[BKRaLYle?&R*ٷ#}Q`6MWdVaY]P9sVw!"'3u$"IEDmQA;/"pHP+S/Vȅo2R Jُ}ޕX"Mçrj-ؙќOiwī"'x6J)t]Ҧ?jh-")'| \xc9uYuƏRZ$c0㢗P3cZRJi^޺K\]%Jj^?~(~٫iO$L2UKB᠟FK 2 JVyVPh (RBLQT `E|hijἃxxI5 \оO@u0 @R|2( UǺj*$Qbwi¢ 'pU\z9Z(D4,gX;eiIYԘf [TG^ ЅL\rرI pŌk`0^h7 )Qh`aV iYQ:F(J0VxSzƑ6x[Zj('oidghAߵd.5Hӥc.}%_JvG;\6>ܰafOu?<[zk-\k-g@v<1,nPfOM(' _ޅSz3}- Rm1ъ t=G[xP) *ֱ|鎝Gr>ٛm|m\9hwܢ. S3zSmT-cV7./))FSkm((+iU Lh=z{ :V5iOTK8QI!D+AG yXp2 %"@)( dB%->bu+f(M7>sDK j6#  @a/)/İf }j䤄+JD)ɇTޟNDx,22 @i3w9^~/liK}B/rQ#; jo!"V<=C9^:Kᄲnx+x7`G ] hѧT(B!UM"Nʀ#`h>]Z>}",9*Y h_uDVqtwvP,+yM M[FV̴dCwHN:Y *x/d G?Әv>D|X_G71dV-m#IF?Ek3v}CQZZcf$(Qn(d}YC\aQyQCB }P/r9$Q)b ^/HSZS;BeN巋ŽY\(jf9'@@)k׹\I W{GNE ѫx>w!O:1?j[LD% 1qg봃:[PٺF]iwmr+zTKM7C*Lk?ov JsoϴeZb20G)2 (nzjwqBÛrH`0aK2˝a`_a`ࠃ!&p@ÙǹZIXTp{Xy\r0't0qᰳ`,>$D%'TT>L& [ Dq.y}Q L;L\"< C"5[ƹ\O{|ׂ@\՚`7:SDȃ%׌c'tBsATθea1u kH[JPZ{P- ״B쿐rs)'.򝜸z 5Hvq(1:p[~ Я^]#j;#s.xƘ!HnH\խFƶR*.[em쀤\vc[[[ͩRkIU._a~ni-'1pLU$n\:808 1h$X^=8/Q8~kSn+:lʝK)whxSJcekL髇WԊłMZc2׳O84j__dgǥGM~)qI)u)8L75X>Y&1%V$/Z]Va//fn3,<Îx9xswwpa=O.^lEk>cIyЋ8 q:_$PQJRcc5eo|<]JUian5ۜӟgE(7AՓ]EҎVWD[):~o)acnm1HY:mQG t i)쳻k%PU@6S3K ${Bf٠]J!)!v-^("EӶ$dZ!w@øP~ `e`H^ĄCj%MxPul`Ca;?xzR7*q:LKy݌^/|#Fhy'r@8p6^z#`Ι(J^;Kj5ɑA\z)A:'gQV2V~<8&wY\ H?nUBk}lE& O:aL'3ZnVMcPU8}$S)re۫TV.[vFsc$XvkL@; x %p=bf)urgUvnZRwW ps{# cq4X2?ҕ$+1j5_ͲD1Qm4iFX@1ƇUF^@V_rӔUN}U9SK_O1!{gz Ww:]:]5X(Wx:]_uWdtUGkSvPkMOtbTaj2GsKb!253론Ukz4DrJȵ0?{ńala( J!{q4ٟuYـ$XKNr"WAImiPbPK;k1ٟو)jy}u0bhѧnLDHr/tH7's+2-1evf#)L=y>xƣb2FIH|7Z$4 d1x6։}N'%Ҫ?څI8WۅcDtPW+ ܝ_#?#n^(Q6>HDbHW˳W&Mpve諜9R mp 8=UDVp5J_ׁ_ҁ:9OyEMG؃f*|փayEl]~])$]E{wW0:zsL(sUoeȲgLj1?2&w;/b.[% 2%[q)Miҷϭ͍&d H#2ۨ&&B s#BB8W*lB) }`52Hس#ZJ-zW{G`3Hj'0yw QķJG$vtDulU[65%r<_J:H)OӸCֹn2lJkYNe/;gGw-̦,yv,k!KyǞKKHY ['[ȶ6h2w5O_@s_yW+2F~Vg osWm)pz\|mN*<6F]Fv rRq䶒_穬u}rͩVSx Rs#`c"vpls%ΑP;,ĊYV fwSpӧC>6e!aiN*4c\QIH1unmn1L%lTZEH[sD ssI"/]PJwʢ$AR`Aq H#9D![PqT00aYLr-%U|}%I=Z?45yb _2 !=MN_Km6_OEɀ-Ί'(-smǐ{V-. Q0wOn^ 3.A#&f<= 㨼bJÉ\KIVfV 5۰poҹ~8Oh<ҹQL2&,sF8,`Z ;4!N,1i$}JDŽ]˱ #֏8~(~ SXܡȹ8B&U)z4Ÿ: i %觸 tNVruEDX2ɭCœ iU'/" ס21qւ#nQY ]Z-؉|~7C`Qr;K&4qGg>y=7 fAQܑ>BW[oJ_LrryXûpb@hɡtgopLomxfօ_Rx׉ sɍ)H|;Jȣ_7ӫ[-eȳxgqO< d)6a4Hc,䏏sɲbwG~C QZ?_VMc@LXJz{z(/9AxPǢ3J//x Gf}_Lvjqn7v&%Kz;~H8F$ÅCԠI%YjـkLј< =9CC'oPEaDflq \j?sQkM᷋I^^<,ٻV`޿/0:y>~7]lh89^g&BMnЁS-#IÄ >WK2˝a`XM t0܂jtJiM1Ga/J !2)ɬp٠X[.-^)~QҽI xƩ8{ObD3( ]l0Ac E%p jM c([Yګ9KЮ4v.ڡoCkyDq䝦ܩIckra,dE[Yu+#qvz9A| >e.^l7&3 wt8OwV`d3shMٛi$EFٚ!/9?>,s#ۧ/Wϓ*,J\h|K՚bj~]馕tkAi:툾-!IMn]|6^SpA^\8=vb7e>)w BWjsݻ`ն̓zoh߿O&]QCeϭ`\ͧ (`knzH "(ҿ2;Pll{NǖEHJGtzZOhXB'^7IiWeZ+J+'$W5\}y 1 1ԺK2^Ra]M\ܸhpm<}_ z,\_: <=WJ>[MwezIrqI`< {$M:;=2VUuUuw-D RLW9 "nj{P&۾?itбS|<3SNІ?\ ]x:0" vu՗Sf@2;Tq׷B%JN%X=cN8 IZ K, vEJUFf`mHQ>-5Zsf& x&md;GڭS_LWJNoe[.lKWoe[ 8'll C#ZK|l ~6^5P\2RNqHO pab 冔E'FŰ. d9(QU1 62#Ը&e Bu-$ Jes;O{ ZCZ689^]䊴)MfE%u:b* &1`!̉u 1eVg ayW Z/8"=MUoK4ٷƂ[8aHhA#EBjogJU6M8 X.#4brT(x9"h P¹ZRS{\?:yń:й n)L @}N ɻDFI0H3(,D$88unĸ+ f "#_FEfF#8#k)BasAZr 1AImiiŐt F c lMSRrdC$rhGcs#͢JX#  ZXI+D:˵TW)r"MXƓ,{b~x]]k jZP &.ںTˁޫ35w&JŏU? [MꞛG^/9en>W,V]kͥ\~n RP}(J,1z^ /DQX9kPMݸ;伛vĸ\6US-&Q$QZ<ݭv7/Ƈn:<;,vanhl/y1P}~e1eZcS,}-hAxhݞb!caKI6#.gS[!zƭM9> i=f׼+_*T~>/Yȕ**Bhw5n,ƍ}D0տ{:檏HS{"'[dc: d:D "`Fo!a*;bVJdVs)F"0%`d!#y58WyƸ T 8Q|{RP"DޒSVs"zPngWdNncd),q;Ւ##02s.~N N{d!)[:o3x;10u_ns [Zqbw6Kr7C~y;D58Xȇo6ׯr7,WO |9"Wq<_,|4w*fdLb'_S?xL|~{͔_%" =p #vT+$lN?~ F]5fҲ/ GqO5+!|Mh)DFlq 7=7Ie-7dmG@d4R`kn a'fSXj;X0)W0\LZ'ITyw<)OϓzrU $>P,z~o,fgXΈ?d¸7?/T. ̨?3 =0r.ۦjy,zaW6aUdJu:$+$ +Pd HͿ9B9;‘-C!Ū+^{,hp,Y+ $EOĭ0udƼ?YLAyЂX Z>y鈤S!N[R~Ov S:^Ex)a0)xyzG ԡTr2Zթ$k7U̐ھ{톖թъ}{KV^ ehn|1gxUk-RөQ,Ǩ֛04%/ ʉf.kLvZB_v~'p?\ C|?ey/k g*R% EP:{pBiBk7[ҌVL$-hM_K ƘQ|Cߚϒժ%xӨW˭WEFJWztu;T_D^YmC@M8~3BsЎ> .ibMf0D4 z˒pCe2fv!4._Ha}r8L* O`&/> iR`J&"bGT^h}uЈJŐ9߆~L?u_3;e徻n[mc.kv Hg/ב3GpCB SzhaQtRy3G;qMwa4YJt>́a?ys}hi9B#,č_Dϫc&Ae>~>K|6e l/pU* ~y!Fo3#/R4RF`%HGqV2)6JzʼtDG,3&&ūTYZH+'ާ-ik b3F4B15Tz$B-GQTÄ6;^<.sN~$L*|׿hle͗劵J7؅DsnhiB~#XP =7yy0wOLl8bѥɄ0x7~4ea;q1W>M9]{Zd׀ {z|ieiq=Njs?1Is'GˇGZ̦ (t?L"Lf\a΅:J,sNTq2X_Ysh̿dKŚmwW _q3x/ɢپt<>;(G{s'qhR>4a:'Yq6us"_vbRY *ss[ڱyM5HzN/%plW,IF~Ep_3Ɖjxr^|7;{}`)ۺ9O tagΪz?_i@O-&>8a !LI3&t4m^3Phm1`J ^. bNk@U@MBh5J) |䚃W:;|Zc_|'In}3U}<| 2TZ*(\[OZ#!EmIG]D#kEA74F%UjE@^*$1HH5 YiEu-R-2B+s)Vxy&U\9D`C 2kV“ã2skk. -֘)j!(1o(x, KMC4 3oaP`1hтh"\ɤ)ϫ_ˀ?yqW}``F p9n+SrP*HYe/s%W.m%z4RMHf8prH)Wdk8Dt7M6 1ү/`I0>F:hvΪ8'왕)2@,ض)4A{@pg~0sY BQ/-mƮ4H-VP)FUSu-z

R]Cdk[F$Uo뷄*Ъ[Hݟj =NT :j'jx`&ZQl2ig(W}ͨ$L > [N^To8cB;CU$Z Z8b  qy}eu'VRsQo;8sgf,{+n)`r޹8Ű_xOt-c{g[ޅ~Dk94z!tM J Jmsr@X=ssV~ORz_d482] Y"u뗻Kf>$S>Ƞ~4k6iMSɸGV./d.)2|vúQ ["Q8F+v;4۹uKSn$䅋hLEڇaགྷnĈNm7WfҏADZE4Iu((|@떊A褎ƺp3xں\n9$䅋hLq9(N9)GUr#9LTˠO|M&M_NMP(8C=)Nℼ@_r8!&p'(X`8arjF/NЄ)Nℬ@_Y'8A Zb= SU$eft8!&h1&S0 5#'p()NȪ "λ{e<ޯ>6W m,n< ˺?%jH۬󲕁 f!U3`}sJF߷b5Z;l7]U#fONQ4]zk)"f+uLǤZap2TMVg0$|4%Xg(,N8VY9u'=3MWw묓xϗ=C޾駋>q} &ΊIlȆ7@胳I n.Ș\j`Wpaz:;SApj߹l0I IQj'viS!Ne!1=l/bO9k<gљAX-aiBsK&Xy}/ݾ3DO#FG??X k_AHSt-X^.:/;z*}1*1 zwCeߛR2k79>~O:\_W/S-]4RYPIi LtUR֖?׀"ykUͻQ2B?EP3{g``'zV)reQ\2.=(BmH hh4Xę =dEZۊQp7 & LwU#.'#t'| j!;߀nX8/DwV?cj[a~Ui\Ȳʍd$uf_zWlm{d: &O#'OTHlE]߽Wg_y5Y>e}+q}iI}dU6xGsZ!xYZaD BtW?3вLNV:T U:]9x]EVc%J]Y($-J4!{5<,~rd"5hϔ:ABSŠR9L1iYw.vXpD8&H6.5F+j\y 瀖uF aV!KDK#F.G]\ޙ3\d3~׆췡LбH*mY,pGto2>tBRUk*-ʒV[ƸD14Fe$uo."eJ\fd HYq/X(_R(|BUKńRNIYyV*p$ZJX_-V8K-kVOΞ>Z2O s& [OK-ʸ~*ٳ_Nb^o(JY":'5eDUQ lA6 ֈBE-!5ʈ5 ٸ`qF,X A 9nnV$J3H03Wb.;#i|¹,ˮ rk \DS!Ή?e]\ U*`cS̷(wč)$s&r$4C*$ qK *Jԥ6c\?BkY/ojp.mHC|o!Lp1gwGrAԒ+G?@`_9""w f?hV^W ;B; 8ߞ f:DTУٙ{zqzc% I> p\azbMi{kaDghGަNH:>E2;r+ݖ (XG|E\Nb&(JA]E]U(c Zbr3],AXےb1`^4>|Rw+.]ȩ1s*sf `MbE_N gmbbmcŨ<{~f.@08}Is$ʟh,/:kNk򝧊wN%a&(Y A SlNJ] L˞Ó/ \95&zK".n+ [k='{r#ByNErBu}cv5F mq+Ϩn6T*aB#. JD؀Tl h#2 ݥPԘ1$gb2_0H{dTRZlL^Ō<2k֨QD)W%AgcrmcrGyg;4&sp>LR u slBI"lBt(' #MV&Na:?-oQ!ő)ˤEo__B1‡s=4z!8iiׂu͉i}Ϣ5szwWwx6MVD`EP2-UŖ1jG1fYl!78KaRH!9JI.IĚ8%)lziL1i4$جM<Ȏ].:$(#IvgB&1]é]LECǚkժ ‚=J-\v:{h HnI _3HVrnu\ON;-*:&rSo*{PIe)l1ZZZ(]QIYQw8TɲJ1)DKkhUJi<nؐc ;gFʑ J\5Yim="eÛ0RsʅHRO1%C(Ղf]tv7 fT6 hQK,bgş;v|'W-8JjBFy/ Mڕ /eIgq)R"F$U<yn.̧$}ٷl>A ]GKWwFEIU~|svj?DW`?)>ַ>ַ>ַ>VoקEtiPo1"i8J[Ra 2<)+iY$fakٻ˅ּNâ!6!_czSOc˪TFX[px zPUX;lA7\}viKSK7侴$"ZRǼHxePdg ^DCHZCZk5zk n #=S^XU 1%r++8,\ee*Na-ae9Z~~-;cxq>آqKZ!:z7(9G] ?xs}rE" | t9+oPΑ帇:b gn?^,?׳zanrIX,̡[tl&9>ݭt̝w -0kߝsa4p8!`m ԷMpCNwNZGϳېO?wUlC<};WAnCh}ĎqB۷Mc5@]}lS2 pN,W}$/쿒!/[pz愲\*5^u%0*,zK+2c^bkƝ[0aP1 C[0HϊQBS_s+>ӹxwLFLg 3Beߥճp4$9Guttkf3(p lٚlҼ=t׾_ Rflߎ2hio}!vZ#ROu 0'VC{SH= ${u {X>vc^wa:c!K'N']EQ8},*q˜BγM0٦ӽ3MX"Eh.6?G nD܈-Qvy5[)8=w^3HT\|)5[rib_>̆ íُٻ7n4W ܼ|CxL`U2}$;߇[%$EE-!nT9|$?[K$Jkue&,6mE\FAQHb6 SߌHv8$'.rbC8gg{Dtw:in(ĎCNZQ%3ŗ\}Nur*0 vK]J9J3Y^G^%1ܰW ^hvRp4]3LopT/wSW}Jr(v3<@@9 YO E[ (S ( 8"ϭnؾ gn^[WS;߄! K|.,~~;} tm{ 3hݗnXSw)[m.n1#XwH/>98gWLw9nQ㬺0.JILh1jt80$uq^#' oxQ3ZG<|(wc95bKt6;ڼrKխZL{.eӺsﭧ[{'nE0d&-'{EGJŨZw3sVyl ZS2 vA핡o kn'y% zv d0ߑN;hw-8*~\g(H@Ȍ ~ߟ˪//tczo~/NIBtmi(اZw`;TfYgΜjfV1alV _~[|>D/OP].3ILq50Sm@Iz 8 I|?6] L.fz>nNsx%%R2PU("<+EA/x0(SB tt03&]5_vt ZXXnG@UiHd(η%SB tWJOx>N"G!"vߧ 0{7STdt\%Y XڐhZHh(^qEP۰N۠#72pM:f]jd[u%ac!8eQbEpem$T-/4Y2.de-\L:Tc2yXꗬʄRxg]aͿʹ>Me}Za[Oj_f!/"뇅?N8=/)BHS aϪ?wo]_̓xp4XI̸?@=Xkw8ge<}9 a|2'#iqA] {ГYϩbӛ`)s$zڔ넑Dm"LEP@(cu ):] P~hY82n* jUeݗqFJmԈu U_/2,*#-sYV@l)RHJ2ҖBJxJS /Zhe&/=``x`V9)i\ $նWn(Ud2c bPN)'0oaKb̋m*LU4JbŲXS@-̱e1g m8=h7GMflJ Lwв! aP;mft#8Ig>?*)qI#ǔpN93HGl̻O]x'Z;c2)"wDujqvW7y#]eA -JxB*${<#F 9Z1cL ٣; ؏mby%7q vc5BR=wvMii8/>}wUofo|$rHGql-+`9ʣ{Rӫ47:!9Tҳ=WN*9 5*1w"sbNKA0/׮1"ؗ7RF1$P.xtg!~1S`V~՘zP߻ޘ.Bˆcpwt{o}G1W!)sVnSIKhseSa2U.3BnW.9|MZvYuIҝ; *twm)&Tx.ˡ]8Rτ@#ۧy ƿ'z|7ۆ]#8\ӽ\cQT'U^j`gi$y >nm?ִ[S @/ܾ?Qyf{^]S &׽.G:8dKmHKl{Y2y=PP.gfD*3fc<4n Z DaݤRHYr%aC4)*d8bFLE+FMgoQ 𚬠v9sJAV}W rNT[įS&юBY{몬.>\=go7b=/YŋdqS+GCxl!Q,~uRκ3(@emy࡫uu/ Q~oӌ6W5]! .mѱx\p\)84\8 ;v!1#q>Ҋц{ZeP3!S Rx?orT1WL/?M~KF?odVݾn /_bYV_ѭFnt[F)1FP]aiRk5Fdi0!]IS#J&?7>vuqo&ϩ`'7ݿ<.?i3w˧u}[ ĵ*DZ^b+#UEHU"`8UYbҕQ *BnTТֵIJWK0eTk5UDSqpeީ$ 1 (:.XHZszPjnJ(ZBc\a9PDWtuDz' F9Ot펒[g} S8 K;.J-eHY͐y*V`IXK7R*40"]-DLyR3S:9*Q,x5"ټQڪ-}(l1;p;Fh!3: lwG4TAH7}1޽Qb 7>z)|;83\(>%rVd{|%hqAV;DNdzf{Cİ%U#c+s?zF#bd@GO'eH\}l~wJ\%Q:s$&u9.1 q[nHV%*Jj ʂ*UU+%5ĽZiKJpV$Ԋ cjWYkm]ySȈ[iqQʎO2EE闍NZ:ˀJzȔ#FIo()پt~\xMjS Fn@`[*!M'1p&Y\-G &ھM1$^:`^9ӇRi8pH[z~s7+n_}BnߝRy2c\j e{!ۤD:>F3>S(O`ɘ )&"nN:ٽ{M2xGwe9(&d$>#9dP ui6gƞ o*ھ $?zgfkߒzVnCΝ ֥ 9S!oԍ!gB~+$Ԭ3x@F:m>8C}>d4'QQꟊsѐsȊQ'W }IĬ/R7ؿEYq҃kqؿEՙÉWS_f:ChQ.eZ7і@Д1^LOjBp, np!r)e R[+ sℰ:7qT9xV(Al->+J쏒ֻAy QL0pb <+ g'\߫s -Ppw`)Y߳KD8yAb1[xE-;<ꊌ1`wf#iG(al6t0:6ײ]D;:L\s/ w cGkvi679AV7G!1A.mr}ٛ]lJNYO3BN-kA %ǔ^v`` i Ī+i,vmŠK*;ock$B(T^Y—jjv|F=1Q:7쀮\5dE4nꊁD oMQPK(t*9Dڟwɑ$lDff/ƭ0nq'Q73CD1Qq-38)[VY7TUYU}YP$-63K}QYwڇ l=(vW Ea+TDjA.ݨ-p@.Pa{F!_{֐+@>}=`2{iat.eO2k=))-8)ed (M.3dF (gHmBbq]bbF+%cBU#;pVSHzl!KWSSdmjVi+ٳ_S×t.+ۀPmVOOѵ槧x|ۏC@<O鱏ޔ-Fz劖<`B.L}Ɯoh⦅W wowd\k25 ZtܟL_߽ fmf$ Ʈ `3WeTu9C``N.Ig Msˏ6s7 oWyK.~^OWy OP9-/*anͻCqa}"0ͯUvb%Vnj2] } XoᆭY(BIȒ dfON$2Oh41F5OӜ 1[-\[ d SS> OM:2tf|6[ 4krˊSir 2ŽflŹ34#[7@Y1pF*:)bIQM 0k~%j8U҂ov\ ܊A1TҤfOYՒGYMB7:>r$Ehn1AIڒ@ɣNBFUIM$O HYT`4Э}iKtXIp٥Znin@@'jCydq(=,-bE _f(o0ͼ1bQ7MԼIuon]_|(k5?qe+;bh9/꩙AmqŲg4l-Yļ['zpK.o7šn]l[\M.~BcV@ |,{M836zrxm00y F¶b xtpV04 u'zޭ9֓-ׄc7-`}"bqM=}=` ;);o>Wpt{5:?7]PFՐvmNvPA#@;\k)RL0N((cުH!Gːg2c R6/ vIWEUL#tɳzYJ?5;~nmn++8eB@>-#;15W7SM#QBOP+=NJud2Y?w0Bq[GqN&+`x~7iTH)PxUtnyA'ش{A w b!O{0]],.L*"IRʩ/ YP߆ 2 ^cRY#Z1V/dJ5U9+8fѾ~Բ~7B9ufː{.c9W¦]UE:ɿ\wJC'L6A-0!&bFVǣ[ԍ#G7 p87.o\_E_3.pJo/ QpWd pCji"{}h(+? CCz}JG3e’魻˴k[ 78ڛx_3teįk'_I ]~+C Lπi}i;i( f, _r9ȯ ZꮻgٵӚuyv9ԠG j'UB؇;T[Sm5j(k*w`ZD8V",.r(/g̝d1,2Qrdݠv* #xA42iR!S2o9QV-'?\lmhD4H X"j,&hrE9}֋j~uI"K=:>eӆ DF@UE~Z2 (WL[;#[䂝j&e)/!hD|LU!y;MdP,rd!PH{R; ^)<i#|ae.RsYG!-K'BKJCE_H.B9WdRdb4ZN"/%4XUpDB!r<ӂTJJP73oZ~Rv1  sAf./ ˜GZ \^xqdTS8ZLR04\ɕS ʔ[RZ[yNGRvh)Gh݄F{:x6 e[Onzrq8 VkGCgzTj#!|k<=BXl0 3 \JުAIiƣ5F6dQmo.; ׶!kv ՟y3HUi*V`fhĈ ҘJǷ7CZo9wc }g޺#z_Ӡ}g"5Ee5  /^3V-$B8J6Ft [H8uYH[k8I!S!&,D% *>$Ӻ-ބ[nj|5+`G(}+ +t+&:/;>97pd#BZ .L Kd@h,syeRT:U4ܯ䔼(d{w͗ުm?־٦_۵y9[:._| >e̖ +jZIV.>nPYTΗ%v1rmqJ6W `ӂhdQ!nMh*CwMSqrs^YbԢs@4G, # %8q0iPBoY_ưQ2mICj~|G`8@Hf_NOBV1@E|ΛnNTz ;B5x|WK}פb{N>+]BGA+T#x$Ж?Q.]yvQrV2dYYI*J".FTj΃/Hm4gUvVYm'?Xe91YmQۺ %ىV 717ZqigJ2irn].-Vi,N2F42wKjr;l!fއZ 1_AFqgx2d;ݥs$W[RN`-n !r1%ykF>߿*UROz ^y4 U%.q'fztbC~}&1\w$6־ vHnGu2/1oZBf$(jˎ `Lȣxe>гkv>CUC76j-(J䮦t#BV:κ!6U{;~k LuATP+gv# eb?yz[#SC<5}$v@hp ӹ>z4ΗM0K16 wH.]/V`r^}鲬(;n\iPa2f/Y=%r[Pk@PXp2qsE\~r{`PߏE>ՉͭHR4.I4HW1늖nb]]Gar+1{(뭼ѡm *+̕D L MR(&2TpHj?k83nxW=V6G8DB"qw"\Qcʸ6$:b P_ .|)rr,OR/DYd\Kt'5s<5x+ވe3!Cpq )%1ػHG]KāU % Z| p>ш8е[/0P5E$]ϫ:8jSAܩNUDŽ],F[rؑ?^V: H(ng7 }Mz&?eF?}בI{ HsDKBrؼu`7B3%_(%gMq cC^AE1)J/z28Tj$GTa^BVZ5:8/XJ k=TU=& Kg\J{T/KtOϢ-R[~N\/t#T}9o0(."?<<njߤ_~pjUbs>A&%{aR9#+v@J|/%I[./k&M5s<,p5}˖+mBY#qX359xrW,yYa6}ag23-õ׳{G8 =\ۈ\o6s>5ȓv9#rff7&k=co: NrA.oq"ZNҢ zD&t3eP[R(BɤȒ[Ѧ ( Հκ7hĤkwlDUO*c~ewo^t~ ֒17-YX \L^]/m]a'{&0ix*wO&͝' &[\V9UEhsQn.IҕO(!)s\{eQ+\[h|fAVOHӂo|'1W ÄWd%LNvifdHm@VA1qHis2Jq5<*ZO~0s%weI|5f(}Swawc<*nJVz3UY$,[atɪ/"Ȍ0ޘt@aB,t0S"$@]g:5[͝έ0M ɴZؕx|y%̡i-0Aϙ+z`WBϕ;Q.;ʵwt˛i2vߪR(Gs[֠B!0'm)xP'M'ylܧc#7CqgTcn (TBQ:H&DI993$xw_Ʋ:m,s[.S޹B)a9)uH Vnj#4wK kMW/jsG 8#TH2\ߐtʄZÖ@J'Scv k,R:/,(o@}f7^Xj?F4G]a.3X̾Č}xq7霞@>o,'||^LKZ_+ZInewKx=:R<KxJnRCM 5) l(\MoκPVAkg8S>g`gSςQel8ͤ+b;Ċ3!XsXID  "L7Bů@X8Hw| ʿ˿ŗ*ڀpc__ ,̰ēbcC8V<@xHһN 4SQol E|_|^y1^[OӘssTK,9acHJ0XB:"j݃7 RsR?g[-jyl'o'B(p2M#"@c? Yp!rϱLq@-1 A^#m9h&,,/ek@ !cS20p$ԎN0D:$i_AFڐP`-]E.AQIʐd LJ0N!/L0]}(?~hS݈2Au4eQnnl=Qʱ3 Lq+!r댢Vbb5b+P M.QdID %8!@5¨P<Y(gY0_p=~;}MZc]S>GsVrٷ* TA9!m˃zf(~ voKtt3K @nYmq|[0Z/Spl ~ϕ6hRи.xןadϗI]V)eD7-'Q%{3 lO{Tj>h%IgkZ]w_Xx9XR}krMqTbM_$9bhDTS1\ofH CS*2)TgL K v4ܔ7bQ?ҤO8 6wq&|yFͲ8~6XG)&.lvUzJ`/jq`X`2T7w/iaXםuD9r|z]߀|" Rۍ~S&@Xꓷ*WciZV R&s?"-9s9_T雈 |/^Ě y嗛nfV[I Vޝ|~SuITE R$'sw"/Z+ncВknH Z#Tחt۪uص"1UWfSygw/7.߀\I%ڀXA90rB$3Q#*Q>m/z}ڄ+F& ߯i.uoH/1yw? #njPbLEeĶ=j{ *]BӁـU1j1cҦKmǔU 2ڈBr6P |~pR 8$0F|L`|m##NBgAHkQWMxeq5tn)g'&||ְTl#LU17uJO(R4R PFW;De|vWRL%ZJaS?6=|ua6 Op|x8Acw!އ[$$\I͘#%"^}Xa&!zDftއ4$S=ڮ-n@(\7qvB ܫk28eIJ && q:D#sfq|B墂4yѿ-6qwiѓ# !z]Nm҉U(4(qq Q $w:$5t,Tif"(cfg¡LS95U&)*u d hXl.iR2K\OR>ׅOon1to/E'y |HY.Z@Kt5VRXvǿL : IJ|4.uuyR۱[guF&/bU8N gY[ X Q}PXpƫb=tܻt_ӆ7–ׅ+V s2R69%ӆҳw CG *Uﳰ.g•o5B W_a/5' &F^1=G$!3IEԼc C246aa+8[7&]G;ƫJ@92czO2>^9$'W)_iݫI]%7(X\b:U+*-R% sPحC%.6Si vC{Kuɒu{Fn3W:8^;El.pLJj_5]k4ਦ`؃}#8 Ji4Aٻm%WT~9=U~He93KR.6\65}lSiIQhuh_+6$mUTj޵X%HDRQ4$nE((N^+G F 8U qNsņLiS4h@ $0Vƪf$KNμ-_w6)RBqd7`|E,h Q#$aeZ;tLa$;A)bI-Yr:YѤ i ]: $\t %^/tus@?pw]D &![N:LI% %isZޭ6) ~[ JfLf9]l25Yɋ=C΂Ҁˁ]cRnp39P(@$7CKB"|!XR3#kA!DIk%1̙[lc- fA@ɜ祈C1D)E1w?MLu g3u9dܯ޽EH~L`z!*X}x3Sse {57sءoV(j[,ő*E}}^EaU+kfz^/QE[PE`̿yiPQMUUsdBzDZ A-^~_Y*Ϊ4DXٟe,) qIi(w l{aORGD}oX=BguD9+ѫzӎ|NӳpܓAy\QqG̯Ëv"B8n]=P~}oA[kӫofw,ޅ_<9+홰:*^.riI1D*/ԶnYex&.Nf6~s?Ώ.Fv8hg{t!,Zo-l z6^U=h31yEG /5i6ڧEj7'_.풣 ֳ08po`,rQ'% M4ɦ ZC=t^F avKŠ餶dvEf֝ڰDl@hvK]j0걎bɴ`w֞d)w O!O3Uw_tٺ0LۼkGmdA#*p:ljns9"my_m.,R,G6:BjM|Q?6=xۆ|&dSrV-:Fm*TwZsvkB>sM))Z:\[  F HU5&8U,;s"H/e&h[dF}A'*(sӱaY> 8EqE|ki=` Աl>r:Ib̤tҋ,3jdݸul+hZ+-FDC:(>(-`Ž檷>Q.Cbed2搨;cFRK[k ]}:Q`jZ'd`3G`cU :+j^{FA-LSqU}Y@>.פ#?R#!VpyTd+?PN=~DTbّ+bJx0"P0VZo u\uU4`k 8H4A(Œh F`έy\b% P6k\a:xnjqbTH8F_ ]aq/*{Y&֖+ތnf431<gZ2LƄ5f̀, % ,%{}w1h QʱFq ]0KL2j3i&}g sZK(;Yt}޷ vkUo1 $KAfIV17q2xz>GfMSi>Njv->UlIBa-wQ[㚝DZ,u\f|ɥWgZY`U 9;`Ja2e26fɃLLvVFmqYeD ^g&t")KcXd5e'vqOL_oneMsȖ#GP='昞Rc4'w_D>NL0QXJ& y`2]i4}ײ2 kʸ57_ A뉹4,_JG"8q^McɮxAn=KE~vTYTq -3ޛxb Kp`3F*C&27"P0T{ Kְ뗄L)#ORfw8E"`dN,,Nf3ݙ{yLc2Lpu:}xf=Xq==βfϢ[eȡojqT Tk=9b (ԟ+>o`M= )1 kzR0B!ӻơfֿį]+xį•~ǿ@N/Of.҈@T{hۅպDK_Ntf2}}/Ghn5(F_Ax+/2<4XiB|\ |8&8PP;5NUeݛAI7+ npNS\0CV,4KsX I@" v&~jjAj8@ȨHP,UJJ(RqzD,˰f*ųio,(mI@.Ԍ62U)R-ž}23}.Ɯa=Zm4bXX;g^M^2`!nS @竒QTRtm0T"&Uu(m%MEaܭ>zd! ƺiשDʁCHj _vV5D͊Iĥ"O%晍y6oRQG{Vg9!j1ff6L+16,p^ҜǥTLSe_5sk(]k歴чlAJ{ҠYk? 1]L! 'B_ M%bp."&Bҫ)8 Cl;сgRXLwLj"K "8N$lcQ6d)Tx܎M'VJil8m0>`5tfQf3b ")4Fq5XDo0Üǟb)q%ҎBcK3%Ҋ`Mq5@)/%!PƥhY(9do`ƙ T ]A;x} µ5XjsPc}W\^Saws2f2SLa-q[ G)K6#B&m!V5@M0JU'ѣC')j[Ee):Pcm5\HoFL`mtxBx(' ƒS$'y, mFpV ^eպk-1OI@I[f]ػK-hW3̈́rّ$12"3/(q :1sLÙB >MpR,S!a?h [e.{#XNڊԂh$PVAwl3O5", `mY+3$2R0o `sI}J#lL {4k/M"5r[)r]n% -d5t ] t8dQ-ϯO7! -(S´Fj9)f_})QR%ISL`#&Q~I1o/'v]-ve0C_2EQ`ލJ$wu/&.rא-!.攳Kq2$m:x\spWw= rFW 膈r@5!.  *+aMfp]rȀjyU.C9\вZW孾Ld  }xwVHn#*I::bBen[.zM֣h+VZFSwD#W!ǤW殩a8x'(3֚Rqg b/-wl|Vl@_G mBåw9"C-r8 OgRGe$t Ab t׻yy7T5oyIjf|7w^`s|#:%¬tOtuGx}(S^:q®bYNduAYE(gd%:/ŲdIwQ(zbjsk. X)eK3w3!]/1;ѱ>}Mˏ?}nϫ?:8 〸.lr~:HMQƸQ,7^Lw+' )V؟Hn%l3"Sщ m`2l5H:qG/w{{r)9nb!T~۝I~EVے^,=$"EXDŽ@e uCRFڬTm +nZchwuhZ(Ic9-Dvco  OY ]aU`#rtCq6|G:D$">6]J͌2E{nq>3dB )>shsrXGǢNg8n1?SA%*TV.&7bz۞ HnsJspP5??aSὪ N5wSS.\zݪF 13"Z NE4]m=b4{>A=7|ȉ;b2AP"1nq蔫D*cjxW~Daٖأ[fYQ/1JVcΩIMòG 5}W BjMȳ?- =}H:41#Rw^8 Jq[t 8~w T;\h)9eii@T )R 9A((%?'hcK-][.R9'{ЖLp#"eREdž| I@{3S,,]tNPێZ`e_jΔ Wun̈ Zw3&z t*Dʹ MSMU1X6X v Iadm@5&ş !Wşg*O^Lρ1 9S?ILph9Zs0Ų.ŴPwLEB) y2:-]sܟ,u|*L e 5i8; V-N +Q$O !)%c^[e my֕ـϿƳ2)gݺ;˥܎ٚ㱵&R6w-FTw;}ᆚ>*Ad[RRٳ+II2);ÿp-wWe Ds8jL 5W||`Du9åVsHrg<,tSKGpYuw٪K1 Vrwpdy*x)%Qx6ZJ|>FràQr]oA/F Z_q3e\3q~a{{x&w2tI5l?/K+ mXI^*4q~268Γd.lW"qeu뭚/_\$&lͺ',rU# y&Ʀ4O7nBGnm11}4nK!B^ޭz.,䍛h+*DlǻI(#zĘN>x㝁r5wkRݺ7nld=փ]& tĻS 5ڭ=uwB޸lSRqF'Ʃ7vrzB|P|+3ꊋ X鏲mJ%Ho,J)Q~'lIFC?J0P7IX4o|T;:)w4+5{4%rd|c)JY6İ]XyP;Yr,1Iyrz!e/o_LK4x%%Sjz! O2\Ҳ>-Uk3"0IOgEO/ wUJF5/ 59Nqg#}Umdo8$d4gs\sT s4RT:e b]m !AjJ'XSa,8aXƹ6^Rg/3 Tp+7VQEN7D ID>M|.h,&2ǽ2ɼ6xF,׌ZBh)P F2sU*:s\ 71V0N7Ou>Oq}P}_E?%&M9FOkpʝq&cFtggYCJfRX,ӭ,ɹM6Bth9stԈ,O@ʈE$Y<αLg"Rc| ὲwQ%l3؝x<_!XSH%6鍅Š)Mf OqX%gΔPܻ\z+pm@=OT:ҒňoQ@FI1?c>_d;9jL+1 ɁN\2ĩ;rpe۴"Ԕ)%5ҊJEhO2N5vX͡nU\h1i#LaJ *ʩR”v]B)R nقD*c,.YY-422C`_n3ġ1÷mM 嘱ntqu$zjȵ~F~?HmJSG˄mhrInb]]X7уmJQ╻?T/I3U'G|Q&5zv;H~7>Y) >F 9 HOJ!R*7sqZKc\8n;&4zиZ}OnDR~&\ék.FڒRqi!u {*`Yc5n!Q8jOp1ηX6\*o]׃%4 /oqw އm!r6<](e-tKRѽ4qٚpt'^!oIҕ_{X[ .cv3T~B!xi%6ZCa!Wb'~Ʀ`̼v\e$F2Ṓ˳<-k*EsbڮNAHv0Nܬq*CK:e/De\8@=^rq1 jť#L k|*ᩄ47#e8% +%hޣ8SPpĩvPz-T eU %(b\U]C@| %P"lCgBUBI|&RAKњELN@x]ACZ+vcELͷ=Ud!&ZT_EP310}ZBBE(gz@*lJbFV9BZoP* =ZU[ܦ7 q'Nz>.|QHpA]:2''F^'hp{`" ŕT&\&=l{@,(b PbH"LspmCy/Vsk}s&HYUf.t6wx5Iq>#e0F^^OW)"ηsC5T*K:kE$OiA'>Z* _H?/qB6H!>zk1r]Rw.kLJ;⬗IHe#X)˽Cps V)t;E203&*>o$[de7 p\bFo'D"r𸸭oeX|҂iSb-ULoduN,ey'f 78'@hIfv +}썸c"hݥvu`*>誮!`V531C6ʺ.jI),+58(b K dDt*2 $K#Jk]u@{uCl! Qx|[Al| 8,od1Lᘩ(a V˺UYIQ#( uLZU:Y`ѻQÊ)$^JmDoQPLm*wA9YU yeH &LVU P$$xRTj, ԄlQr# hX@pRNtKKv@kĎJJjZ5tK;n1ؙR f, ^]RA<\aAcZ-_SO3s.=ϼnQC~P /BlЮ˓]`G |n>epy)_X[NS]]|)Ά6,*8ar=#ߞIC\FSIbEnоX+5gXVqMtqѧ>o2g7mᜳh߶Dž`Su\|\=Ƹ hAnA*AVAZ8Z)(;h 8ͭ!qʚ\bRHQ11wmj}GW?DzD2E|fSUF)VdXI)(oW+XUyAl oF$k=+?˨֬F3B:(&ĴzfpA]3f 59uՒd$-f4Yb*Ic|At/o@i&Xg+hw-9ZEg0B"a+$(״dLi+%V!\*UBZ]I,<^1_:uM;I}U_ߖ ݋t6t.ou>_yn4;%%5v!Y))MeDT-0krxח(ؗ__Jj>?|~V.lŇ~\\ȅ ?ߙ WtFcz8ԧ+&bܽ{wscq;_L&:[~wKa!DlJɯ#MceAXn+r$64^4B^)ٔcc 2\3%b KFt:2ϭ{mBn&WKLCG~ m& Blh*.n3Y5!C"޳',ş #v/7v* G#Ni,t-KBÜɖf4TOi+U)umR=+gP@Q ;]=¹[QH\-}5M*ɽ 't+JތK%R6Cqh8a}KGLkZaDSכ`cW0Ɓh+DhpQ5@OfWlп_= cŌn?w=X;P'o, [&E];vu,ڷ#<ΐ2ԕUȨJʰFBSWkUh+"B(4*u@>i_'29!'{xD?>Hْ+8v6 3X5\^y|;2Ý-̟.>-v`} X/T,>}|x~ww[[?{ӠƑϾk"i _w}b D^1o+C \\ib5Xlqde |V+RKz/3WD{}r6^wWH==-ƒFgl{x\DŽZ /o/$H wx3UẺ83h "-@[)-LXWu/O͓uR4^.ұzW\MS)[]=6,\| E T4'g%uQ0 5555)ܸNXl- #RW`JK]NLA*f0E]`) Ti)ܯCC(E%Q;@a},f *1 EEe-Q)g2R7pw)J[\ ,x]+7HZTMPhZr!<T2UtNdw9˥Οtϫt,$?Rk%PrڻAVl5iXTbV/"G ӟ?HJ\FzAHKgtz3^*LR7a{`כM4ʦ(|(s`3A>w/<jd'w' n),䕛hocyH ̞6G)=gڨcv`Mm7B\X!?Vb gQʮPF+fUR:ĉOϟvsJpOVO1tW(p Na[+$qTb6^Xxʴ0HЙט1AE5C@,B36NB yVceN(x=4sTvƺNeh>垱NgT4P5O!!vE:!ɻwBqĉJGF(A0C)Wq uu&:N]TlKaOqvT Wҧm)b1wQ2x#<8b”aQ\[HI,%gMOKܼ(sQOPLx~>7u[7صgIhhG _S#4PWY馗!q Jih=d3U-S553'CFeSޓƝb1K4K4^Dž1z̞;ľ>19|[CCIbN lAh,2-\㷼LL.E&!hh>&זrx:MJ):i,+XԐp9B+ނ˩d $͜x[9%ӂN'\vML%Z{gӐm" CY@KgA(C0E)T%\dSPRX 15RhIVjJi]0ZJaR V.j#xA0 ijDsԬi Za*c$7NNNKl.}.}}B+m- AK^K.B. a[kd*YiJLr(؇c5_It vYCЇ"T)Tj[RT4+n(#D`9Z#G+A>#Zbۢ`~eHqx~.IsbF57)#ldU$VVBZiL1fhsH-3R2Lz@D㮣2(4`_iP|9jJ1G Y)45%(j VJ懴2—j b'$5RZ;]BpY2t j~rG˗?\/e 0$|||vɟoaͲ(>\B8-fyO\?\B8<_A%|g˿_箘/~Z7,5.~~x띀M`=)/? s&#| Ŧ{D1E }5YE$Ҽv :($&,NgpS kE y-Ĥ6}1 )]0gO&k%ҕcBJ.KV+/.b·ɧu]WUosۥE.s0/}#33F-u]ƅӐw&|߽3J#DabDFapfŢ83ôe`qwej04$P? HX4!x|ZIK͇]~< ry6?KX6an l A8R%Fw{*h}iA[ugݿ֋~nC9 &M#ņ#1)^kCI Wb"1QEჳڦU\50qK]WVs bڥ'V 0S(~G.,;\ry@ âe7(fyH2+OœiA6dS"qiLdl?yٶh>JJKvqq[V@> k-~aOjsFkeON~:1gAf4b#䌡ce ܁8´^5o vYTF14":b'hHW_Y&dmãaPL]•c:w _iA5J࣍6W4+rnQJ+Dd"Č3p?0)jMm.z6Tb aca.rSbkO;4 q*#iA1sӶa"i ު|g RHpJZaڪD%m!֣1R A.?A(xp;[?w >wėkq!D>h.\kAy”0FR29g@:2{w'5r¡P<Udˤ0 p8| k'wx(5^uDX Bcх2|\)2<~ 2uԬn_5!Ѱ l lrbjp͹΍]~|n )>\=Q*<w3 yM>pH ωtb s#S\0vx `NZJP:3 }' W<]{yOmsWu85";}k~Xz_?}ݣOmo8`KU[+ch _W8Dqǰ*~'y@.J\A|>PE{8YRk !d k$1e!Mikb muEULoxY/9{ŀ'88Af$0l6$y %,n{j*ȺDPMgty.K<Ԃ9Z=O5N'wHh#NTdWum?!ֹXIEیdx6YZH"H6#V;ϴPNΞ6 Rz0]sXNa;Wykw/ZWy4Ւ@poqze#c"ARiN B4Qm=5Q(K`}b q!3J)!]H $'v:i-tW)e߀f7.Q2%q7ki7!p[,!;F"Ow%Qzn񗖁-q)'=FNb11hy+ִ[|AvK!!o\DdJnnvVJ,!;F"Ow$koMŇjE4J"]=wۥ`ԟg4IWg4T Bz瓮qm] ~p% !K5àz?!J` ]n'] !b )W1$ IWg'@ i'~Bҕ@1>??2\'~B$;C?Ar t%HO@~B'$] 3Ţz?!JA(h(d17Ќɔ,G=Y_M4Q痻OQݛRVL3c׋'zMj]tɭ7~q mg5ZcDNkԅd>>Ѫ5?AO>]&0)sļdQ|^5"|,DA,Wr*hn vT.lLߺ3ōsـCtK+/=h/?ՍRz¡k40Ĩ` b,qb{sm@ɒoe[47*Dm?܃@\bvRCLׂQHb*F+n +50Snn@ ˸݈*s WBKg8[o1OU1,~ϿDW\7Y~I]ǣ&vaG%Tb^#m8ʁx2UUGN ~U3`/5Z ꈽGڬ~ff:3OBs );|xPFJFTiBET$+Q`C1Ѐ1Trdr =c5zpETЍA@׃E;Vҭk_-Ogf;etG~ZV/#g!wmZ s+(7iŃ 3~Vjr5eWƼZ}qAtncn}B}5\h8D ZL/r"{ĨK؍'ʓ΍ũC6j0䛞5cEP4m=%JŌkg.C3F#5ù!M缥uަ üE5_Y|=ӱJPXMk\@vU^^V<:#+hh{Q2 שjy~L[2f ]\X #³xz<Zv79Qhhp=Z덂XҾ]& !YK)i )xA2$F5D2!l2qJEf?yFbEıEKb~VhQ(bKMX.qD! vrcP 0TfP f151h18+5`g4CJ"C'1S9 Ach Eb]-)y]q0zt_jn]%;E3@3YU,t[ b~e{-;sq@h<޼O6;h) H9`֌f!92GYht%e++c27*.2l,\PU0RdHanU 0 *9WZT>3 X&ә[JZ%*+K BdH"@&]%90nP.r  1. 42I/3^ƕٵH[ 'wy%-|HPC) s+ 9~}RP0(,vCzC+m,֒<#42>"kPɌqmTbe:).:"XJ9g3NNڕ `5gPF$PBS 0lW"ȉVH4,'v㕔g@02Dx2,DJ3N{pƵ 43X^% NJN04'V(ʔ¸@8ˬbU&9DB?4qkM.Sء󐽸-Cf!Lj] &C &Zn\88?Dvۆ2qMFD(ry,Sq@ zwca߻u{Ѵ 4j+5lBk$".[_pxWTS*.PBoݏ-AI%$vjk?ݗfEʞ L5(RYϫ!9:c7=;)"r3w2Rڐ9/|eybDkLtH{{X=pގyʲYjsN/2n]hSh.=ŪNٽy/q)M9\nV(~]bGR`&*ۤ^pvf, GGz/Z$лy=p)1Q)E,tO*=*[L;3HsidtId}W_Ni3'aIvYGHJ]tA X>~BžKroY?tSHž_s&^^l[%X vgwl%& W)| @q~+S4QʼnillW'`GB% H4"CZА#b*/pȵe:[~(1~,{:]Y]/><ͺXʑqAXI q?:=jt{lSvPYc}{=!ic|oV# lL߲!%MA>'>T_h<{o h ޞZ][ 8w{mWn^n%L.oٮE@zꝳ*,b-P*j6?hVVs,V+)p֬[=[.AOv]l0+ 4 6 Ο^6 Glph9(JýOkZO[c]u嶑nq+EyLp njc,U cP($!x 9L\a5J~+DEKr dfiW[B_L?,~r`P2 H-l_La;~|;> z2za9\)8DcC(LNqS88+m'7%-@7Wt=X@"n7"t`EGy WS^w{KՁ$ijBmZc"Ҁ0 6`N4Uӂ(ji(rT! -S@[*"^y9KqdPhFPM k\`ȹsHUX!IQ^k&nQohjV$A]cԪu2by =_M=40q=^%iB/C񔥱v.OQ)iSc2$KX #r-]ra% Gb#1m5!ԒK]NL|1$?D̐Ba Wx]-rwPTbK{9M> D2LZ< 'MB<(9ՀJ]` 2d"3Y #f,)2*1Fjw/0ki&%h-3AͅrN/7"7cA#BjAD1"5e,υe g\1F,c N,.䌮CC.S&g )RKU(`k(vpqL$%%YQH;#8и0$'`V[y&s#r̩#@$,` k'QknrKkΝ5?/wa Uw_lzܜ=\Xeʆ W#r7`wb4xGߡ :r 蒸ҬȪXMg,^yV5kxKh_Ei/]nb2>~ pf`_R  )M DE@1ZļEnD~ Tс V 1\T%ecK!&<+ۖ():O, }T~kPCa^ghe R7ӗlf2n÷ffчyz}uv]]" 3`57$G/;Lq06~,v0> 8m\}U:"+3Jv.R #ȏI,^dz(&-_ yۇ$WXeB۽^[|-Z[2ٙf "zwξ 5 tpT֑Sux[O \!_UYε8>J 8BN+!dVU#dfN]ѽ:˸_n_Uwn/F}Tq=ģ1?s5 p;c^eaO ;n;@(OSA(ԋ厞i̒} md̒Ғ5}:%ޥE%*Ney>ɠr);>3ƕBzZ(rRNi̢{^,FjpUq2|:[JWŭBFgBFa\%QDH~0,-u %{VݩYxJ'c< 2yc,e00:VCt j3:z^zbaN:nӀIy`Lx04pu8Ƥ8.W q\VTY +{RWn $gn=r6 Dܨ<{ΙFkT[^ʕ9Ha,hiY.:p@$*c[CN%80D< u%1$xd@)[vm(T*; PԀ eM5!~P3|*:D ǣ-֭P8оFuǺ9`j6xm Utw5W f7y,Ӻk!6ݐ3MցqdH8 l} v$裚0!nJ¬mvDq0dTwjftw4>~WO,W{c"~:ևl=br05hj QRi[g'9gk^֍12#f ^/Pu=#UɢcW"+9J/`<>f8Jv|J ee2"Lڷ ^$w4oڑ)GV 䵬^>O.ʋW/)~έVq{/0] r׾xm??51XṴY*D>2ms85o¡QZuj1a]+]YWʺz^WLMV09 245=23\(%\1z@:F|gERiU)YcTk ҆k T2̜1W0TG˝W%Ő)EQhH\U w\(A22Vx V663msATG#38CtqRҴH˙$%Q\̂m0V2̖څvT%:k:rA/+tZQG\SxO6 PιA|_Qb㶗c՛e_h{Y܏E:lx$W.|   ޴y7>}wړdfq*<]n,h Wg~]}(9]./WxD\̟oKJb-4]dZ!F^ϱ9ϖ} 풫>k qE|M3_-6ȧ-VEGFqc9,whd )0>1A;c&%YQsm7Sy+/o_^(X?Df2{$w 4TXT뮿;{O[y:a.V}+E !{0X9b?js>r:2 NdSb dԟjp`%2Tq%rGэ Usrh"OV3,[`dZ&SW)@FЁ9aLVO^) EΚAL+cKI!Vx})1 e@ ={>]7vиs53)pjP3'EG*Ou_fK6o.IwMpa~7X=-Oh&ۯPl%MŐըV`? C`iVѷa(?V dVM/qu#X3^L )I$dU4'$K$Rڃo6`~θfTdUDa Cm8wƬQhs EP1Ǖ)rÃE&EQZbڳ!D<LyD 8l MnF6$`(vdD (AhLi3O`f^, u sPf0;DaS޾pG@rJBKS2 }樷\ɘnnT7 ]W݊P^4! ,cvqAH9wA ܾ Zn^Iދ.w$mp[Z]p-D|h NVňCYqǵ;q-%Szג1q^Kx2iUu pƲT]9+pM em'ųY}nbyr\ j=ґbivcUq~ѼBOwe|`ͅe!XW#>C- %^ Ei-F36Či @PqVF{V}: ۪ʤL90/RUP k %VUSǪ/`W+ry.<1S^dZդ)(BɧwYq8VR\;:')2ղEkjVAiJZJPNJkHD@*/ʁk@YH)/aýx:6HhΕh4: ZsTeFVX;НK'ylLfB9,oBNX%:Πgǽڅ7X- _0=[߲#0~>D !qٴ9\EӺ;F0Φxr ԅ_ YinGڵ_3FtmnnlcO!0 .eQQIC1䄌5GFB36ӫU̝wW./l`N!2!5a=#~ BjSJ^ދ#=%7ݼ ,[r7oM ^6dniVa#$D1r^ZHN㇫wK}L:vkXW:ADtd'aX!gUhp&Ь:*{~{JFrc|A!\sÜcvǰ,4"&`0XWiu"}Аz=ؤnI]BSZkLLrrY(r!&GIk}1fo NkBB8g˜BYLKzdCo͇p(=)k4(S=9mpc{8feeѭo0 %9\N~<wnH.ӍZQ?_<( NGk;?S˖ՌG$v9oRfJvc|b_q_ UGw^v2 2R{ü=ـ*@GȝVRK6R*ix0E( ˊ~K&ڰD1'35p)lTޞ Ԡsս)"φ\J1➈r ](varF)Xf8m=K Kh+eCԼ"y(mc 5'iBRek0䑱?p!aԠp>il׼aɣD8,vD2cI}DXUZymbFJ M4v_n|iƅs?&u-iGv>Ysl Z0 +AlJb ˡP@Cep7VqsɔzxʌW't(ϭAS+!qȆLP ƃTI1^uUcڠY3v$ͯ V]=_lzʹ$^TɸL s$kfsxaAlo T<2ƷZKZyw^'?آU'NO @l)51/XPDYX`XxomϾD0VD)0%qεik ŕu Qe8@uJ(FexP L$f<ъKBZ[ӣOZy]ڢa$:ÂPg4E} qmPD.JTL iՉG!H ;+}‚yLFM7,(}x[^ M2;1().0Cڃ!g֡$-|(ZAsk2"R2(,Ռ1Fa c:<^tr%?ߣVSLh`E1f4=ǜG9fT-],f>̥PYt79y~LQJu_1UJiziж}7mOBf\ou5nS3-Mc& as+aqQ2ĵn|7aƮqVDJ_4]!>5Ip ;\{c &I8Zᆉ_^8 6Ns΍fJXKE`/;nβ=dtiGE_*i'>cD-"gdiny T.iZPFEK<-ȫH$[l#ǕRIͩ֠`C ْV_m01_wuXHz6t"ZhƩ87 %{okٲA {(g_O>ﵽvYq\+l}Lbac*~,?ʲ%|/H+ffy'X3{J ;]`uEhf X`غ$!S|OAvwn\cp)нI09'Yr'dYH ,y]r(G~a ы#Sdklolgoz~o07dXd$quIH WN[|[iۜAENG $^{ 5#-3=8X|O{>?'7O/aU7zVh7!nvZ7ޮ<|>[w8>}N Zl^Ijbz~dK6rIßބni3/wZǭvZE/5 m:N84Zc._hR]O3fe=;ڜ&mbи{rjKSzo? -Zh i][uբOb|/և1e@Ԁ$QGvZ13׮>N9j>||ڻ]/{v{tW L5!@QiO?7ԴZ@k_:>֛׻/u[[yNTqQ]UH^~z}y>K8+zVFl..;{?-?t\ϭn|Iv5ut{|v߃،ݴ ~SrAŪV"݀^_DV[|7^t6ac3 79\-sAvn bݒPE: ? u1< j Ձ@kyB֣. Kp 8aJ?8+!)qQ{ľ\0))^V;x]?_v^B@djIFL>Fdh IS*%=.X A΁ZWs qI|B{X,8rE0PXR|T [խVAuUP*nT [խVAuUP*nT [խVAuw6 *Ř4XS{yV$k->s\bPX7˙@He\.zH <虞5_cSrX'76yJ$6;zecsΓm PKmvMymwIВOvtuTk3Hn]tf:T3RO03KC枘@Fp FAзvqzK^;+0{LNTIgk$a\:6054j\4xiaM$J(hqDP)-b$R++ <`@D- #4F/|J"i;1(O2hY (40QD2)(y;&U1[Il3M9wa-7XQPGxf a/0)C25}9 q̀(&pkjWX ] +0Q|5zvC40!8 8 ()UF0jHX\Ҙ$ZK2ׁXS!Q4ch 0p4bdu60CQJ} =ha"}EԪ ƂH.Z 5V,A8![Mi#)(>$Qde`X2ib)'ķb0"vx"h(JK);kR)/wT(6o*EH(Gbs:_ $BqUAD3TH#HĂ@눁&8Sʋ +R2g Zܔi+NǪp> wz/F齬T“& 2# QF^l]֫ō ÓLP\_[P߯@;yr哝H5&AxXȓQzҝsa=c 9cxӐ'>ql6L׎悲]a0N2rljpysy3c9ܖ(>e0ÅU!m5+(n}qG:K8~/tO)7z ntlqs5f#{Lc46vչls *kY-_gk(tvayNK}?އXCadeyGZ3 Bsͭ'IkCNing73(:' 9 `D#GGĢt&&ûcBb%C51ʒGQwg]7;cWRƵ^ Cey$֚!3Sl!cqN/KR ɖM7`e!#=}ׁy].t{ExAD2H0 z.G>hX|0RsrRTeV(y<]\ACΗwE5,@XrCrw͍Ȼ!WޥtK;ͻPMɣp19@$N@hlc$s\ݿ}9%iV L^z.qot(C"(1* ]@AYRm6&fWW ?,xziӿ.#.Eqe]\rYxX+4/umٮO?UR\fh| R )5mM/:T +z&svd2Ъc\jk*\3L5LC9@c8,jS9DǽH%iޭ)ef^Rͪ*r9|fV`2zxb-s"E)ۂ og4 *և2[Z)cȭu_g"R4QHiDz\ }(fbPtEz}:j=X]zz*fȓIQ{'(5U`@ A͐R@"^E׵Ģ#ԲnMZm1xE-t\ZfAY! Srjnd\9%z fAЌ/׹ 2Rph^`<,@ڡ3.j{+n(_|y\Uh0jE h Fbׅsh4i*^y(C{) %պ91!HPiTߤ e:P\gN hS&iEwI o4C+޽*;;VOm&  k$ߤU׹xm~I3΍bcvנ-܇&_}hxe(F峓l5;ֈw%"5; 6ӈkfv636n"C=UNfclo`GSq}E;rvrxncM3YClSrSamAm!u \Jߕ{XWjvuv Ʉ?(8PJ5SMZh;T Y{G+KtVH\P?hCߤ*7`j Q:X"M4x^b9Ú7hA/pq R@L8!H.erPa3d&yIdb_x^d6Lɭ9iB Fk L$Rbgh8jlɁA ͚7h7-y5 "竷1Sx·~ALj ?rOLjmȍB{OY \kvZS5UVuv0ǰޡryK*F>:CI3S=|d=ZLUs0.j>0o$*R(EұPjc8-TY)h!LKr@>?IK:'ԛa"'I#Jd68VTTyslZVqYQs"R({V`ܒd HD1}pqm_4IN|ց9"ﱥaWS.Hr E\ʬPgXk菖$TM%ߒăE , ^*a'qh2x&НRkuCeMNk!]0x<|3Tlyq }ޯ spLx$^H -QMlip0}{gOӪ2qwǓ-05c?|n1jX(ȿN8 ;4/ Fѳsw%^?.qKnߜW7"f3*Cnߢvs&9 _WU mP꽣3AlUU ͝2[^4V rg  B6&YB Neഊ:L@j% i4$[k91&"mrвiGX]n~ cy/K1=UȐF&]1lZ;93c4& >+SOA)K#~ z4`nW5fz'nt)ӕ-TT{#j~ pi2&i)3W( `%ZH]^gt"m[?+~j6l>ˊzC3\H Is@o\Xnm0 xKR7)׌ 2s ;ZzI.' =,&lj4]5bvVLՁ Yu`j)3]{dw Ԛk`Ʈ?{h +^[xaq`Ž:/Y5OQslv՝2;RW5zFX "6vTǝF($4 \qb:!68e`<(НhEZa3Jb 59wBIϹ3L6LRshf~e$mZ*;a)̃ 69tP28P+|Q+NR1",[ñ?T0BP7H#{>&g4hOqyt6cAp.c|MMP{h2FfK;ZR0,{!<ͨ^Q[mv 1y38⬖\0I HqjP-[ K9HZri=};xn'Sɞr6a0|Pp&:qPc3$B>˭曥lbp΍ J *Dߝ-91#) |"Le[+ZʂO{#!4]URJ]3iuC@]H(ZFcab+ y2`*" #1Ao g8UDYM*)PøJ|m4H,lxKinh $0a%V,rI#t-KŞ 9"x0g愅`\QLF`40p4!σ@%54-ԞwyoDц`#'&Va=iڊ`5eD`VDX8cmޯ#u9JKa:8b=ִL5C ^H %Fi %vtxW#NPD-o8< T!2!# NzmJ0-EEUk[2!DKaA@/X`8ܒx &3^Ũ=Lu*0BcaY NLLO0%&Z`&Rib4kq0QaV35,=(sH9k9#N5zTg hxiQXsO@6 dh 4F8-)ߋ8J&or)M6V/?7B{VcMZ/*^Xmg|* -MZFhItVcZX+"o5Vۛu~38_h554CK/n {&o5614CK5Z#AIh ##MI3.dnD[!d&,a`5. ʜ6[lMBKFҒAS&ڂ2RRkY$39aaw^*zi [zp{P]1N\sF\)sB)sB3RhPX41)u) X1@Gii7F m F֓=@e?{կȨ{@ K%Ru P;%R"m`$rb$,s xrE/66"״7o4V\Wq&3h눤mYH@?!3oH< T)|9$XvS(*AqH@a  X; I 1\Ga ,+ 9Dv"HDŽj4tS#s\&Ő"Lqj%V4 CQ A>UIdZhIoUL߅nƽQ>$LR]i/ncA*8SMDvB[HAq9Ŝ|T uv9P΀- p>`CR 40 dHQ$tRA."AGԷ(֢Ec:j%bK:Eyx)t5'Sޅ\;˷`9#b"6U~R99;WMUՁը*2=ze{/M9a y] <3QUr2pQJ׮rYgH&ۑ#L1"Bo9^Ӥ+ӳ;Md) J( Ki&8h6Z!I^с>kx2>0)yvHPK eJ,4 |})4%5E_ILVpҘ##`Z`Jr OlP,XBb.c :cScP oBK6KРoc |``7)!O~-ʣ,wٟTXS]O립>ޚjY\Q-xXGǟzzZO< Δnx _(dҰ^xxm*(qPWbk )b&VbwFGҿyy}si7^ioP{,dHL|,O@gE}ʎ0tnR0~)ޠ\332+Oz+FYɲq\J05tYQG&6ͣgGn:{jo.(gv<BD7?y& Ctsww_gM*b?2P\9w?~ ~8??v {PO.g<U?VX_nRGjΥ̞?}yݧ?gY\n+˃l%5^ze^?9Ȣy>߳VY7"͚p1=dK OȣPS~=a@V;^M7\շfg[ 喓̺2;DUr5k9Ce> [n$ů4>CUF X/fVJ)xvvoe[0[?=^ w7)^%/;om4 v_Ԑix O'*sqBmWFNjJO;}Vb__Z{{Jœ;|>;L͓ߞT4%Zط)ҁ|a詽 {6v/'?bzU>O6L^0\LWBy)ՃTOmzzatТ }ꌆ],K탱ޘ<{ݏ'@$C8Bc޶?q, 6y{e 4W>^}wEfއ>|x4:- W^tOʐ:ZjTj"Ỏ4hٻ} yOAo^ E/r^cWk".ލ;A5gOϠS9 O;I;;`uvU?Iu йjTnӤI{?^r3< g}>`tuO?zӝWud卦H>MPZ8.V88'3_cH @˙a&d"9ǑfoIUYjZ ;Y=9 :_c-sKgml`^edfjr>gYg#Ki2lg &֭ ݪ [$v뫖SxVڿۭ o߾gzvۭlWy`2s%R1ڠs+s"4w$s<"7H<左;B[sID*.3$Xpi?62 /%PƳipԧ(%̀ m,ŞfnF מ'9E:(juLDK36~B]K2{-Ju%˘]c(LL&%<=N3)Y&Kc?b3\<lSO͚;sv$'u2q12c!|b P3JZ_ЗV~s(Ư+ӯTW+mJ]+WWJus~ŞWRtҬ_j bƯt_ivfAoJ4+ׯ$W_iW>vr,Wذ RH&_ү_Q `c^ݘW7՚W'ZNKX7֍.X'3RºP-X/m~_mJú7f֫ͬm;vֹu 󄼱K4ԺfSkԺ1nL-t: Smnvφ.ik *_@_:%Y2 yuO, ~Tg/h}BPjO | rfO8ar0&>R6s,]Uq fQ92X?dBO'󟕝ۭIw!t}7Bߝ=~6)P8"fj*KGVjwmmflhˉ}R=lR*\m%ޔi )rulQFpdj+3㣋n,qLPۓg:~1bȰE4#-mT1J .eT+c5SX6$SDfvYduȔIN }y/O:' BG' 3g=Uzv̪! , $Lȳg9UU"! B&*Dzrf7xe[VbtLDR`ͭɣQ)F;gB.٠bv7/ 6$2=% hU'fjme,ܼ1v*z)싉YFd$+N%,R ى*4B$dUV\ la*.iV&#(V9$zr*U;XB),M^UTp ZyЉRYpR1 O6E堎 tQAW~Y(+dFYF"O`uŒ͒{7Rt',m=#xa$EݰFg>%+r*4Ae.R"YQcY#*=وA6.b1% HJ9t(A%8[u%@?yj;BL RY]GVDNAB#:d2Z<% t&Lb Ԇl9cpufTyY%gk:rzBl`dyAԬ&0c ƤNOvp=ZޅJR4%7UIIgDy"4 9]ճ___nyZ|7KaYa4 chHZ:y7roU!bR'v&-;< XưdR1cp $@*& mzshnP;4D-Mf~g7E&}F RKVCrJ)}8k}aZ;$7fbqpNY#R;"Al}_Xe]L,EvGNmȻGWjd.`ƣ~y۴@vj_4Ğ*D sքTd!E MEu)퀾_}0guo?Sއ 3ogO}>=)9uC{;r;>KMMC~H]IrȴuNAL2oAgv.+d0{EAoBğ}9M,= o짯=.x%t3vȘ?`O:KqƳ'o'܃i2c4ޞ$4Ik[kiyϭ{'=cكW势#3΍{|~W0 KB"\^ 8^ה5d49KrF~ߢܢGQoH%8 cc;%P&[v?DȞ1\*71_֋|{vqio|=/^^\?n4^VfwtmN>L!3p[)j?/?jiu/9d$A${g/$o̎r-3.N̅!Xek-Eic RB=-/z/ջl]7fCϻtxnLtnf9=N޻x:}x;!^3~H .n 1iĦ8'6u{;rGaw7`m[)g {JzBmXR&dYK;E7 _SLj M5>dw /J?uYs}VI\nϥZt:ؾSjB\+ƤR7L* Hjh PA!"b&‰\ZvI*GT`I"A ?JEp[ƳI$Zd%zD.EΗ$j"+FjfB^)CMRӆ NNDTHo"<4r- $]pNBNEݞ똲M0zxdF0,t ߁(A I}5nsz'(a0bFEĆMjX]SO+B8!-R"EaD<'+NzxE+SsLEgWq /n k7=;~.^nqP'}AZovQslo9 V%:s6諾E.NҢ[= zzҭ&X߿Z(;[ԽOgy~2c6?JӕOX\7;JU-\V?_3f{xDzm'/ԼeCp.#-oYOiU#ݪtҭ/]X;6n)dtܵt_hQ !9)RD xV~_+*vlAp&xww[v&фlpi*.$"LmIhD12?2K]@o盡E~Q դ<+N֣V3H[@+&fC}ZSplNvX0ө;|ƒmJwx%)=w1nNY&`'1Lҡݥ?GVw`΀nÚzDbچvq"X蚋3`쐩n?9'#Ynzh7t۠ȬGfU<9f:dybQ!S'W |t6=^/oҀUN ;Ur>?9ϗ~^υt{M(9AUL.->3gհ%0 J@ԧ' CLޑ|ų}ёAZŰ̆ON؁'ޕqdBe\"LHߋ A2*&)yTskM^Hgu:Kթ0M5!+B^[=a񊱶<$:6)i^ ḐIXa8繮 '6j~a9W'RJ^#}79ޜ>CjgfsPVR(+Z ^d~|N=܀xvהk|Mύ|юov4ΓD?_ԧ1=b ߗ͜?1o6l_wJ/8^fK;|eArȺkO:֏:/(p[,U_{Y b5RxM8( )PT($")Q߯}=(N!x8h9 (a,~r0 " ^|t>Z8J<2s4-.@5fasanobyY/D`@'N !JCc4F>3kXtǩK$GyfBoX7#1BiOV-Mqw͋khp:@Kf%RJ 뿎6 w\2x#bߍFvɔMӡ/C]{#`bkqr֝Y|.Mϻ#zwf.:B=ѝ#q6b" 栢v̓ZgWmWu !5?ZʲFZoQarfѥCPr-Pjgg!_ײNj6HX95z:3Gϋ~hGâYDd,~Q|h~xfpĭN# OOjC/;VNWDʒ}|zRc|(e z2wk.~pI[T\1hS2K Jix"PSN$b')!0^9r5K`sдUzW_l^+s l/_+w={H)UEh>]W5CLb  w+ F''y;IX?d'xJ;!,ӛd%|  O0NTْ s6H︓FvP]v*8Ḥ"2M2"#8aGS[2f mhO*03.:L3:(G)yH~;Yu2 ˽ϗe4rl ]P+r'ұdOyFSp]S*"YC[wL fIeJR;n}Z Y>o/"uM\K pŷ3GA.2CVAI_Z|5ӡ {S}9ye|z}J9q7.3ȳHNw7k깅z}Vg}kuj-Ցr 3/<ĩ`H{g5\K ,bKV'Ǩg" ?t*wHwwBd{Dw-grf~r O EN ÖŰ pc60`p8iq{O_9tzmaa/uQR$8㣑D";b +i6)De* qFQblͪ< 4a`)Ȍ`G"RTdH;z} 4EM J 0i]p"OT"1kP+>^b -j+lIoMMTA#++P!Gc20"&(ΙA\&^- ^ sSZqhJSL[+"=1͵8inphT TvHs%&|=>?k4Akf :gsx7zEd\u xȉƭ|\vw%1D,B@Bv- RP'd܆̡SLz#`j.B͜GGo^p*(}_m!3ڠS EJᗱۅp ?y){ 8sMH ðb/plx;硠B %p6\d͛J"iDT8\J19(ʛDT>j*Uc&"1Z)S8RP!Rs>l3AHzv+Z 6t馎|Tʴc !m?t{3A FwUz: ϶=Ӎ ,sj 踛~,ˊ ^! /S "a犁D)nL%'!H{;e\&Q|y]6-j͹WUGƼ͞I !L%vwMJ=W_ /*کx: ["_dReM3ȮnrSqIueՋ' de\ dpk\B1&c;fw%? x7X ȃG#ٽk`W-F} ڌQiRtctFPFM8\!"|*4}I;(hHU=ip8DCjv`Щ"`FFgzK荠KH 0. АthX z㳬 Qw~ЀH^o7,>$摊LةSPCyWZxabke jy8ϒqC9eaos8͙ϴDWk.a׏]I^K \Wz]Xa&z=4b7QW; 74gj0HA"ȊojU`_t>+Fxϛ|:;q:ȻsT"LQ@uugh9IC68Âo"|q 0>Z eOq~>Xw-5d ?><_[ȯ{;͑AgYdGWV 8o8Y/:[%o[ Dv܆?*H̪WFq*$Rn=^5z;h^>,J˛ӧ}-SXG~J Κo!酞u@'8K B1sRť h;Vn1JQnwruw}Okq{xkX|nX\-kS!B JF󆷧 oO.ij}dzFőA0$`! b)ʥ^Foq4JmjhgS[, *YF*03TFqk5Z-ZЫ>PL{(];dV%.XnGy  Ou:0~* - O>r;dP2Q'l!&ea%P[i#fAV% E<`x gMRZG.W\{ $z=}\'HS7тL![_5H2F C;zZ۴zr]ôU̔f142S|r0LJϱ"q-2D[b!@>Z|MR2^?֏jq{;. NR٣e5zX.mjo,] IKa>5w1JC0'ϟ3F3 1Ypke[rSPPx ]x@xk<nU[%_&?TxzFCtZlpo)*:`c>IAP!j+z7d9yitjW7\.k_N>TrTh qJf 'i+mH$ŀ_ r10zHBQ<&)9<$ȃI5{5=pY;MP9)r=p_"<]sIA)2%,w1h)(W5ǭn-t_%w1d &L" a4@-#K{ޭe Pq֌޽"ޮsH0~FxA"m*dp~; g-sV?0Ae˛ O8I jU]R+.jiOg i=n{d?,ZW'ia&^QltrexbS ߔfoEgiaSE4EODI ȳZbQdކ;"{-ZWۧQqt)K6H`nS8h2;Al8JN2rNyg0%-U)%%>ofzq}e/C_ ‹.cwZjUgI啑@4Q9B^jǸV_|N},^"{^|^ }顏+AyߋJw=x[>,^DE<)Jb/>ϐP}S^#iײsl]ƪ ve9ٿX<,=szcI5K^;Ֆ-M+[Ps W{hnsM-@zj&fH\e0+V Ԝ#Q^.Dm(Y~2H@9+ h"|>!2>m`U3 H̬#֡39kA׼#G,w1h-yf3̭~ z NYf^˙y c"0f1+5 oLcgshA׼+jHYت PD`\3ݮrӇ"Z[A3q=+xg#2,F6D~Ƥ^l柎c ,n98@mcYX2XqDg+=2K;*( f|nu`@CtB) 8`ŏa!`̝|"w"f߻OAJ Y.zKv\ϯ$~Ĝ?*B "TT$W6N z,{z(8;8HR*zrr[0 Iѥ,Ry0(8 >j(E&( g''q( C Oult5wjxOPri@-B+f'RnSƞG!"]9x'j{*F961{=|Q&}KU'UI j"\w6QL~SzywӼϾ;9|Odֳ~ ;C):] sx-PB8\EQe]JvK$E1%ȉORlJz%޺$+h\Eq!9D܍o_c=Ŭnl:menGܜNj~>rߍ.@뷓ۙNJq0Z{A`Tzl>6Faf0DNb;FYwXp3jvLi*6XB8u):a(94CYpE ԛR"\@)pJq#1H||Zd\O^Bʕϯ<Ι2X_=ScnA [ܨHSel TQu' [!]+`?vqDhХ#b¦F:p j  )B2x)R)0\twZh<QGN Ja+H'ٸAH𰫭dR'NHrAaL@1 -f%y?JDwfvf|h~񶹝LgA9HČapV& 1_\WcVF.8gtDRfAbZᰎbczĤLn{A>8"|lcLu'xMЍ]A.VS}ś}v_ѳ sR^Vh;L rEI@ ЕFDg EJ(ʒ8: hb')F|mR8Ptq~A"gބ|R#%gX60#wP-1cң 7n~R tаYR~Lt Ӓ !A4e"1҆ #@EcLgƒP(#A'=i#TZu*[㞘Eʖ <\Ϛ|?ۦ7fav}:؅}t_y(Pek@;8:ĐFcBh60(*z2/4fm"SE Ni8bh>p&dt+dx`U/KPQ`ңFC7xE#l+fqtj1W03N%j EJT?+PD"rBD7'eav}:؅}&v6TJϷ^-R>hhvFW7i%tdCM|tu1=/> ?qǏ*? * w&Ta0NHF1 S--(Rk^D hCͣˬA!H/$`gE$F*djL55Hzx"-5[cyD[6,I}Y%3m+&.>ZSŹb,DʼULd5v` |d)ʈl>xǎ%]DEf !U!?$H?>ѱ-[I.g -,wQtRA ZA?Q#\d<tu" dpY{1@otQYڀ 4Jy%ћ覇z8F)6kVLQj8pp6·Ék\KpA-KW>wBK -$$22Ol:@! tV/ $S I:%_2A%%/v g-3M&0m[d+u A"GHz z圑dhAuYǴ*kSmvL}!94\0s ,Vs-9SQDeJ% Y3c' @;.fm7MnwaE6wYy6v i "}bc#qtN-wDZ"Jt&OFz< e1T bC ~%v!Q=l~n{y-ޖb(Dx1SŘ.A6Wm\NgMσyAt17l0_#K6cAV 3'0&enD5rxDw1d9)gY[/zT9@Q gZ[!b+-urvnPv#ܘ9)õl!̖"V.վ^2k϶ˤi61a uaEkMzjN ϩ%iWjCMKK4yy㥓ګ?0%&"D5JUy;yg,0~%bG`0GW?Z ڹ7׻2IG03"[fCrvT;_)HԝLƝ7=gJ':Hפ:9y/pd?V'|}Y-oVVIȓ紳$OVG閿JznZ}[mCc!pmlۆPXW-I_E>}\/E"EOΦuso4Τ~8p%F'iWYKf'J`ʫ43QNnFd0״\،`KH֥K ߀u;kF>;຺mhuݸ*9*(lE1֦Թ՞ OWǗ4}^ot[/Ky3(o3qyb[ OQ1(zR+9uctED--:/4W!,ډ4>1I׼Z(kU$:Fu L0B"<%ͩ]ͧ`vޯ`Nj/ajuּ} BLmUr 0Ng]c˫\}{u1[=,\xw/^S|MN%'jj^{ f SeIN냁 (Nq{|swM"jdK1~*i'>c$6lQ9#CS)MɖIp.7E ʴp%ʿɱݛBy !dB O Y j$X.Kb. 7Ey8^1sbCZQQ5ɭ>|ɜwLr: -b$KFt6-DkeZSd_F}zk6ntҠ22§Yr§Sr&k#Nb8&=D9o0Xb„j[Pt^0fS\x4FA1l_gɽR}C]Pr]Q\ی='cᤪCez 9Te3Mz -[3ZM% w=axՉ?"5= 1pb")rDmjk5EUd1BY:@PTYHx!U}3v(澿1.`5VDէ">LbϚ{5SXn#Nbm8i7+AL(56>Ϩ!j(R@ŴNvG͓y @DkORBl $ .Dk\< V.oxɅYWmبcMT5ĐFcBhBy !hJ Y(W$6| I"W1!B*ʌkԢ=‚\wF-GB)2^zzuS< șȓVj_d}S\V}Z8w<{G[3Rμ'^l;%;EVdrzY8֛$jI 9Ť J=`սO7G`z 0bsbr45ٛi;I%0d[{#:SLQkz1J[!{s@|%5()&jL1X`9iRGQPgPLT-j^ gH8u)2(&c=]/Y53:)sTbP(-C1&ZUhr2icUswVYOE[=z-ω;t6B{6~4O9O&!^?K"ɲ2Ms[4k'&iy,r z}CXHqU$g~j|zgg{=|Cp?0q|93M !`tH4LȼXAed'Prp=>v# 0&Թg? ?XˇTrv [IY&Pzxb'mC̜Bb,&]/idG flyLPAˆRܽz:tQ 9w ZOֺ Ɂ$[`MNo鷸p~@ÖuvX7h=໴Ԓsn5M'*DH;YnoZd_뷓gvo Ɨ= RW,#923zt}OQ2[TpZ;9œ hl )L&,SI4BR r薾N%Ú xϜExUr R*1VA딾VqM8\Wn͕n}pg΢%<%&ݤb M-R(ou4Jb@SfPTguQmmFW(=NVXu+ZVRArHx '\(g-QTFPF0Iw*P5R{.s6nFW7]1*ua72w&!\0*[*$kx՜k]R^̠PP;T1(8 W>ŰS׬I1(8B } qh𴦭&K'}5|,ZSLଵߤP: ?W%&ؿH苯Ad3gԜBޗP,dڱ{>n}$ŭv}2O)w;S`V)/xrV t@AcՁ۰2JEN&fSb'u̥v^ ߢuޑDŽXРP6&jVpzpKX|N+s?{:v?mÑ>=evqœ^Bner<_Qnz/YRi \c J7ne&"Sfvj$Z?5\KfB[-<+,nW&3yof phhfDoݾ#d,6%oW?WCa#9ώqE@nꄁIy ?^jYS=lA("jm+Vs3̺S7>EHN@N)#M^Xñե1"rR xc;8MIjC<:8=JEJi]u*gX-E1hJ鎫U=j!̧zKƿKLauQiռoaLٔ6\JYoRj ^՜b/\y@Zj iɪǶ?g"f}ag<>dqRZG8bk߽'z 67k 8FQjH\s-?yh-O?.Xr~2YˊV >*ߧ:u"'kN7^ʚD$8smQU'v#xU-1Qx}sjp4+:<jV.D 1tz8k.8dBzպ{CG*Pz KZpeahΆ KdrRs]800.FBqV\j'$>|ty0<*A̵:m\ž߀f`W-v}~ b _].ٽ'd>0#laЎRGw0@>:ᗷqVrFZӎt+q{k~7vbfoL>oVog9zycJR-kDdeVbdJD,XsLH,RR@diRvn38 uVT*cs,Vw97h%w:>]O`[~5IgkdX=}ZK$"F#3G~$ƈT[0?2Z,G趥ȮonY]>صvIm/ex϶^DKh712eneQ~A[XTGaUR?ai]qÄZUƫYOC16Rh!8FK`b%RrUC*K9"%^共zuϧC{\wa4$C]pQ ir~Q1+v'{Ov  l0}p7 7Ǐvu.߮sIvʦlyg^ܓ7޺j+ro,Eۅ뛻[zm6%Q1PAV*PMǗ7M-Շf`? ~Hc77_]ҤV~v9Ⱦi-B|X¼+d;Ѹ%2{X& >\>0Q.6JL;˧o\7ĢyR՟TЏa)J{.)m]y/˴;^p LFb`d8МH' HA 8ы_JjeVN-smMNCNxƨ!?RKmo̐!}wASiৈBzo@9m Y8,Ҥ(R[f 3k<D2Ae^\\Sō #|z9vYeaG,&b4z',RF81L6* 2 u)`tv4x3`vk&g7ح0>kJ]8gXS$8#t |#ў['#5f ,sk'LrR^.@N Pߡm~ x_J࢈+M ὊNq…VA=EtBp%诬Tuk|S}mu- * >h-*Z(Z` I0DZV V[(5[WkK2儈˚OO|Y^Փ(i#u0an.X" Ky0@|P9=VYh~_B'=4N-3Y@' d?:g=gS@(Ь`4+Z|Mbt*# MRFXHMH!Mm$ cuBk8hנf8 h]΃鍅Fx-a#5F;- 3zyg8(4AJTSFus;Nl4y%FlZAN_w:tLNeV ::YX+?,GăXW.0 d='FHmI.O*8MN`IL1d4|QX$ՒtlVGst`j1pMٻ9n w^BYw/&VٕN=yq$-tA/8)հ@ā|ޔ pU<Z$88i51-@JЈ 3v[Ec3*c1"^ΔU !PZ2IT%ރd2MɅyz Hc0^daHDH/wܫ;כYΰ1fK$s|Ŷ c~SzOZnfapJۤ31+gғ7-asa&n תR`,g&Cc“O+2 L^(-JO>䳓O"ۆbʝp(gaj՜JL^43jQcC׼b$8f G|J&F9kE-o68JP'ZsԥPd^0@6H 0tykihwK CꉀEkia>ŀnCg&CIJVsmRBPAƈv&;CɃ'` WqNm੭5Μ1u)=ap 929tmRznHj}a* 6 1N谔uEI0,mD0Mnb*Mcl/8HƘ8It|[I$yn I*H^v(|G9郉5#RU)jJkwj>N9tXVBNi݌ő:#2X:Ig%,aWiD-s쮨Eb *$03eHspX9+SݡϱjI5>gđHC.:bQTP+8 )+d-[8Ih#_Y38O>~0pT~I˩oΫiq#|96ͪs(\f,%NRb‘7,IV\#~A҃v gw87~ <*T./P%MaSx1?%Ko}dVNֆCPOy1 lNh mt,cTƔr\Hl0b/a%QlZvw% `\F#) A*im2)ai-SƄR࣓(x/9Tqfgc^N=3&V?-zu8j(+z.)ӹŠ~@ig_=H; /Ѭ5ϑvcfvA햋A}vABEɴ[DS[+_9k:wƵTa=lbƛXx6tZsmMa$S}v1EgULttVdPD=> H} θ\(ke&z|߿e|QV )~*<çv/OO6Ola-J$C q;yZ%1v9|:qp˫7+Y˱~I#+nvHrIsE#>b${<\73̓C70.GO`;G p `v9;3QN}ֺ)V{$|z1 ub+۶^sض|ݬڨ?}ybYQ.k 0‘3L?v?g)ǙW7fVklB璐>f_{Z3]jGr嶝d[ ^3WGi{)iy,۔֤}){O/0Ll}_Zew+hTP⹊p~ˉs*0> nBl(. f 7^AX}8ܨ5 =9UםqYm''Sڌ޶dExr e':)˱dRMɥyzHIT/}_c>M/S- 6DHTtj^wc˹x`LϲN/ç?03Lf&}o̺m&Y Le[ Vkqaݏ/ނS&r ֍~vmYGݿ5+uqypd[)no`YzYc*_vƿ<)7,OHU4Oq?.0[7f~2l^^?|^uVEA_^*ȯ..$?\ݠz@DpQ ./s=]?41 &8+h F8.LEPͯ+7m5.`lN˖= O[+,$g\Bn*:͜`(m'K-D'R:)EEMև~ѣW\j&&KH%I d]rJ9ˊ+X5&D3 ngm5C=v?KlEu{\I=g1 !/J3rK؉,vO>gM\cQ,MݩsE:tN+jݏ4P&i#K,'L,+ɤl$㭆S=(uok6Z$* *BX@KqU!Ɩ94 gX1DZͺ3>vRt'/GTg5#fS%oAJ}g<_|.iqa+zhC_?-Lx>~[vNkkJY=7#+B~K4kMuFixFX;f$e^|Қ*FjQkvIEKTZL9ɹO$߷̖r1N'+8$_x5wr9=mrD\ìSu7-RD\v˓c\a_ɸmn\52ˈʿV F(˗+ ށL>O퇇''~'6#ԚV8ˬ7X,&YL9l,'XxjH1f}ϽmKp`&8{嫿Ջ퇕UkuuN/#op~oix|Z3[/wa)W]M3v)}tҿӚ\αvuh05 +'< xr`|+,AVuo}<3F[y4O$\VU7 ڼzTڀr ;c+:i@~RwL./!3!جCa!mp- ;DdV K*˂`b"i  1j4jS6QDp}z71Xlr-Q:VA&2DŽ Uc TqQ=rP\&*<_uVLM\p𓅳|:aIU(5s7BnO䇐‘5J hVтt #2[Mu@f < 0fth2;}]Con\'J B2PanI(uN1Xj fayNhRd< KV'/yNJ4,{nٓ@yƜ!\?{ȍ1yX$}9m^g-$O9?EITIFj7+u#k~j}U,%kF 8s/2/E#4<λtqog=|<~]kUpToa}f*Oye^) cyESp73Ifok=0TBpptuP`0 > $"&P(uTmFH4A#Cu]'k7 n_E%JK f0=d0% 6=x9Ghk)XpF @ I6jNʻ?9NAip֓Ah*E=L)#zuNި©R>6LxyQ)(nz I:z<_-W"*t.ydaCLrpy')jfƆt~c> 5HՎc> 5'Z:Kι@SwTO< TPD3*kQy靶`.v USD4\|*02Y]2 k(oR*wdwx%Ƴ)1"^y<@KgakAXk%F9/q/〤5Y>1ƬE.nݽL\;;Jnxmp͎GAr{d=ݑ>+R΄K.6h]#Ĭ$DyU!c lCJ"k2az,p05{wtU?w_;@[T"q1ޅ}n_,߇ ^+emaIva!n"_k7_0\鈄`!fS$JbsX^ ::B]P-Oq"RoDoK:zoq q Kuu8XZŪH8ayΰ>DVbawDG* &Fe^KzKZa$ L(IpbX*b,9*ZRSCpԽD%6^lJ[;K XbX8 ^ZРpZg48H)7MQuj |%6ei4*;psd[SXtS&{OXdy/t0U#DdG+nQLS G9)Ai,r31(G c?zG`0r0:yͽ-_gI-/ES!.2jxJ$F0ak V+bbF!D S&=p`׫?jzϒ1,rJHsve&ϔjJGˋ9>z}ȗñhuL|:SRhs܎P:MERvm͕8*/@J0H$KpJH6H[  ĐEL=Q^IPKk(gX` N1l^k_k8@(478_SMsr 'T:%Q c;.Y͌rА?Tn|[ rTtKV M%AUث[1[ p*6%x' 6)XϰOΑӜ"yDLAdYLQT1݇ {E˰j/t?|aO1k R19S2uK!)oc[݉MU|rx zIKAͳ$V.Gf+0@8К74\U&MwLAɨ2atT@* UX8"D2ԮL2Cd4},(},(SW QL 3CI>W5ZCO' |*ͧ|jPۭ ɯ>dDޑ9̹wdyx@B@~Yc7G(i2+ e6aQk)5jdnLSCҔI pO(*Uv঩QS!n$I7$L=._*'F)NW.zAAx2!FL6vϟ\q/iwb@z$ED;,Gg{PҘWz6u -޴x>,כ.]'/5|9JP1*,TRϩ݅j;oJ=z֡h]=={c l<|*rjsQ'T q9od1MӶj4mBht c<;M{9h"4\鯿\a !:e$7,9ZX Qƾha$Ƨ+q/G g:ǣr/ȀdG$ՓM٣ G { <'Bm:]C UK? 2R՘G/_]se/NxId)_B,SħbDͲ_~cO1_Kbvn댽w2΍hgbv%vM/ MSq"&`X9$I OXCD^}SBLᰯS]o⿄`%(ONYZ\*͍c.b^"ɹF Q '#k,aeY똑XrSEʱ2AkGuq`n{JhA:h,hvL-Ŷ=.=N4Ra;n0ȕTW AJB>u J>,|~Z='Jx14F0TkIeƉ0#`^Ӣi æ-"l-Wz(XjV/$DQ$fG taVh)h`dR;&"ڨ!HELdfCanjá@5uW)E*5LVR))HQ upNQ+J]K :"S߁Ń V ([Y\Y0QrdK@`éfXTkp48bXZ"PxTlzS q"y-"w,<_-W*],i BVR*Ofu}ӌJ"_.a *k3׀]샶aqT#`> 5g?S\" 3qV缡5k8|j0/ Aϻ9ob6ys2SJ9:?)rn}Ic.<ޔeO^}{!"^ﯖ**XdP{ k#oz)/?ݥfI @O\2VJE'e΁]Tg7mzJr0@BۡYbp_{Dٿ6/T &pB/S#)閪SJu"A^gbqݚǰ1.\ q|[gDeI"Vgx;ea=<ܘM0o㞣sBcސ TPD$M=ʟ>qoH\ûp~ +^/݆{vi.*K;C$0&#J9<Gqnl>~(W: i89P )Zv DQH g.ܹ Q*9-?KZ^>e>C*gZƫ 4UQqp`L<͋.lA.z\m w!"JtJUYyI3okV=>GեNF5lI ZGE'V/9Nɂ-ҙҎ#@D^+ٖKC1 G QU!K 4Qu3R0F.f.(qt 90B Fl*nvo4Zhh?B>y7eh􋹦2/c&2-w`D4ni=陼eK*ŔAI~Fd,bo:F46 z'3(n{rEh\6f$} w11tD4kY-tm:=쉡 V Ф KTAyU)FھH?jcqlvtBaP0LA/ {nRC=UUWWWWW1&XC&gfN^..)!gR]ҿ ֨ylh1(#)gjkηU*EG$. O]*:e`rp'(2 dhj'ôϴk*Xf _,$-S1`Y( Xxžj.Jޣmf/c0i($~tC=HLON6MT~]U72)n'7 `3-NXy%8:"~, gyǁ=&1 o6p2D .92D `JN:}<FzDh(o+Us2"<R5Q~20ktS5`Ca* }U6w@)p`5b$O [)*z'w pD,G*0O6n&ZS, `/cϊl/9bo[x/a1;ߕc}_}Jīh78NWӰ8lU >.fa'i|:)Qf2iRM bR(8ҸqW\c*c;W#`q``C,6 "E;(n &?޳瀺YU =x pa dgj'\Uk9 ;R6PxU"6pyf+":'?ՇN*{R<Ռ6 ]* G ՚ M⒔zڤB*Nϵro7t6]HtϭV<7ĊRG#0LL")*,@Vqާ܇s$R=GqZN ;=/['^ UwlJ!5 JXC(bL#DhK˲mkFL8J>SRWW`J0c5Brƕ SN *:̂n%ve $ui½sD#3@8ҙLg^8&$VR8]Cj8nfEzz?֜JzNs9ƭSǎ!c\^LLdVLg i`@~Ķ6QlҁۧXޟy4ha@]3敳zqȐv2BMy $l^.fyYK%E\y;j*݃WT+\\u=$}r-G Oc1,L(^5%,ō?qat>|ϨN#~wk?Wq9c >˫էѻB{i{wG~`U[3#0_/)Ӱ]`@>;3snОBɹ Bѐ&ԃ֭hRePEuRQǺ*n{EքUp%VObw~}~Dv:[\OVIT<{ Tik*>=(I$yi)M3JAD_ǟI~<[ܴtonԵc8ij:4)]Y+9Ki;,DGyێz2昗N$({G"=9/&GkZe V{ѻlo?Ίv=vTbo$h&<~swRiQMy)xSp{cvQ@rc3Ha>֣=ECr@ փ÷EAQŠㆃ:ncl>wVNˇ^`wl4!{x"_xެ>>.Jmf4Y}D[ɳ74IVQp)pg*LXR<&{wSL/~=O@"'Ie|~iDR1i`J4f;Vb\88r}C+w (G0l:~ x?F](SQZ5Uy 5X/'irz-7ˢeLO˓$xin/l /nP~`Qto ^#91K|<.< $[|JR0&:57`lG!j@sWsRFoGg3Y"\M(#g5`&O~~:?8xxo,j2+ je&xa-qBEhˍtdet>f%@PQ#*CCERiV>1Tq Dž[0轣<  ~Jdc;]u)BZGyf5=3re(6_}~p~zoC ? uOT@|rbP7Y~}9DIF}xM0v}'D)÷G'T (ffz(DL艢H)G,|خ`Bzk9I|YD䖹Ɖ8\o7iGnt>>b`ŘOcPRF񫃟hh2Kb bKT08c;cKGoLѐv)է w˃q90'5j=f89'rY&DXhJ :>0>p)K|V=$)rc桹"ftGAq[|xq߻rC9;=M<'Rx3\Dq6\X#^0Gx_1.V~(}ܘw1"68TRjy b%p } Ou''ыjrtX r$jz:0%mWmEE-ED*Эn"HWF3/_!kȡ&ao'E-"W^O"uЊ讒ᖥr%QMFCj *(w?xmu6M?I/UщsUCM!pxQNݠV#5s@hEF85fl=i=R}9ɛuB9BpANC/w`øSGFZ1%rr胩]n!H71e<G4^-/_#Ǘ"ǗUƧ):Sðtҝro|xVxv$gD& |Oj>~ԻGozx\XĚ:h ^0mܻ|wv޻VzxOYygDhrVUBu;<5hℕM~!Ua8mqaUА!b{1aVc/Nww݋Iw|NnzL?j8ޏ:d3U.^; sj7~Mxq/H]ZwrjF @oiUR5#6 @FjdEHTޒ>lk>V߽)[}kZ!-hw&+gOwn20(֝LOBg'tjM8LTw7)>:Mvˆ"C\6Rp"2976z 9,˲@i54+ħ),p>ҚZ3DC8E3QE6c 1{:s]`";Pp>CZ s! JZ_+#TsYc:A:3h2EEd d`@6\ f2- #4˼ TkZyk/ f6SqҕbIL G=p(l Z`7w'cs>z8eQP&*FQ`zx09:V =؅+ y2=#1 H?~!4`1 H̽5[8`6QøRL&тj %huf<$P"z S]()U + +b4#gRZY ;jIvFkO`d8G4hlf &@, y,V9VR{va@%JB/LBEz`d'/nڌeI!6꒒.)JxlöȻ}:uN)̨- -}63{kwpQAz@mP[.f:%© ´R}r2BNu>IێB H6BsE8o \A!PC eRXӱ@'ؕذ ,q]IJg=CFRoU(ew#!k4)FJ˘agpxeH$Hg=%\49­H}CdENGa1Qg( "j%-_vYZR4C #Po|lyٱ}%gjMW#3=4dVF7ɀ-T!z˓+$r]UCLrp蓠< d4'yw~m& ,R>^?nova56\ߜrCu#Aɝ]*"'6K^;.ne yzsEomZt)}Zwqަ-YT`!\m =%;D_v=̨%㱱-QkXft3Tݏ^y(kROlgzⱇ9CÖ}O~M2SDJ unV˲؇'ٓ'C]OE*b ScHwˣaIn`crc6i4jSNepoH`G=,X2|WG1i {)d1n5@h)Ez?_p&~XDFGad?H2aSj10ɢ|!&UУI?VױO3YlCvZå!EF\"YdG/8K1AECr$IWShrR:_m#3f=l1XeY: p2~nX~ ( I[Ν/9'cEb[1m `yikS%1ed+:.W@>_->9åÙ{Th^ kPg]ky 3 ojD`?wSè\0s2fiڸ2/Z/8Alِnf[k̑p2.h f'7B#YΏCoPG›V-V#1Xsۖu[ 9xWag.̲#郧v,~r.M^Pu)>SrI%Uly1ȷ?N/V.ϋ\(7) 7WO:HWT퟾}1 C$K߰7~"+J.G^꫿?%N05EQO~Gk__ f0 J{~8ݓOgJT%LL.rzy|F dR{vP]>|"Ij1wAJ5X.80t׃y` ׌Ӂ_80(#9mxZ\|߅X6eyfn%Myg7|[t2_B"^z)Ba(Lr8 W { DggB4\Ӈk>eXT.Uºwq/@g-=v}ZX;HB~"Kj9=n([O4he fZy[gM+>2%G1ѺA6m,,}̺/nh݆_Ȕh(Sb&9 <٪S㸛_\ z"E>ϋ|h9>H*1%R,%6죥.hi(JGa*Dn3q/; 4B;t4ڻ&6IkT.}xC:+UôhDuy61d]TL1B7DMm%9s2&o{% gS8{sWDC"6 u2n9*INV5ڇfIU?Dl|l}I3Ӹ9jG` rG  ڟ:9|eCc+8G.XkN!z-}H Ozc 9#؝6GE!tO+4A`Mvj< biTLe~*6SH!:|/hU])O`K|^5,n/ᰑ̗G/ny~]C.>{gy]^ԏcl]^J̟0em QչYfukRjzEF[jԾĹd*#¦p,Yӫec'@i9a,WVR/y6kz@m H r cNBF/Ȝd$ cYf!-dkj vk9*8Ϣ 2'G> ȣJA9ol\F˩lu*) s6ɃI_O)x GkMF9Β*Zq:~P2)Ae#93B^$r@`sPV$P  ʠ2I!uĔDH)ٹvfDq$j$6&d.:D ,>(* 0T#/D+},wTL08>!*)lGZK%y<KH.2Q\SB&/N '||1R%.KdA8=g:i"PX:zcס$lK=eQ7rU8eEDRcBe v;MĨԥp 1z0B7X4PCi P\|) 72~~,a; %cVLR8ǕGJyV0 \t;Ԕ̜VPG!0eU&dD1 |byQFΔF$Ke#D$qSFq׆Ȇ9\/HP~򗅒gZ( fJ֒o(-lI؋*շqǍ&3J%H&Ke,0G~U^6z>] Ya2:M6(Zc!FZdz[,]8l'ˏkt{IUx'eN*u{>[~(\]Υf?y"?6/~A.Ͽ(,D~dwuEަ -<-9ے@?2/cOުiD/Vv8(y-EfA:RAxI&z+ s6LUq*Z˼8YqX;BC|tdTVbOcm>hAO[Q RB,`(N^k!g0H%I BD=Dd T T3vdC%adulHV+B*+HX> `&P]n#[\K/SVϿSђ%C(xOlIնj[,UNHY"&2بRpYׁ`KFM#[p\V[Y&-z~bXo`LSa%JF2Ne5{0>CE$7oTR]r`lzYr)G ѳvG;p=FLZIʳQq&4/EJAlVTg Ƽ=vrr6VR9:X_KݦR׈Z9/<~>4Rp MQhi>Ɔj.tғRڴTzB̬Q}M4lKO\Km c< iMNcfG&;=u`' SWU<@S۱B[lL9̔!( %T&>4A|6uc)DbĊXf4Pe5+x&s0tF[Th" QX< ;Tgo{v״Ӏf?ʹL_B ʹk(pNl=ժW1 8mb L8;;m=0wi)"ku~9dR_RTQc|/6nHhܘf&HL{^qkdc6F;e"+ O _O]| 'Փy_ص'/O4o|9ugT {k[3)&)Q"Yk.2 !FGA[)3QyF3,h#&9 )ք'c]`)˺` fX23=7WnNGoC7vFKX'?wz#WZu8{ӏ/⛻ZӏZϰU~wG~F-,7g?բ\^]lV?$?Ż7 [}M[sro8@~^I 4{~We$H B j:މB:eTjsi/nLvOtxb:#ֹ-pԛڷBl_?o3[;dҹ?hE^!\{,C:7>>(C_Ǚ1Hz+ns2wth}T,>@0 $w?I05\;X1sG lZ #R[S!˸YB|s v3Sy!bj%z$ SKu>E)5voڝT5:E]5Gڴg-=E-mϭTأg{YSn>r|zہY[:|oJwOt Trj%!&Ugt<VY+^ϻOEA׳2~eb~Sf,)HfEbQnqw {-ycZe^o꾻/-N~%_X7bf^o?ũS&žfrm؀U1U9ّiJXgoz#Yn ޸q5vs3Иv%zn'VCocy_cZMʹX(g.G'B;%$TtJI,\.z+<׍)5ʜNZ]KҲgɦTXm=wI$kMohHbQ`*o@>б#LA#?&;_]U-_S$oM寓HDeɎ.K1hJdc{m % _eY!-uNE2t2t+AzPo_rY N,7=^F1\{<u`NWn .N9/rj$ a8J>=)ޑ / vD:д8\[TQol?U7-jD_@4]cS`B.802QYK9& &DV>ޑWV|>w<ٛ 5xAo\'zf;k_}@gxviBCP`(Z-Z+,Mt$φV.fY,QR*ƙ`덲|@n3g= ?dRb4"C$eHb^"FBӻZsp,Eĩg羫&W5}[1dêP"b*Y^T(k^]6tYIbt~ צ S.Xga\KRq6"e^ dR!tRvMC[bfkczBQbZ#xBץjSGmarYV@=:prg73ݷO?@c!#v (c=^oݿToHfͺhO#οkݐ}}{.¾ʼ_no>޼vuûn&}7\+6 //ֳGup;e\^zqD}&H;4m+p|<qy8ȕೄ3Poa|X#2qB}n匱[ȘW+ƈ$_g\?ZY'(W5u|-FY׎vNt$47i2qL-eQ䄻P acM“ >?Z6C ~jhO?7/ *ZVЋ I/*؋)2`Fo^o޻*?N,7(" m'r67KgYڻOޝbؒ9|^э+!,`u~|<SmXW7#i:pjTKm6 z.MJnKwѫqR,m)_G}p6Ǻ.ϡ_&/^o'ŧtj?~@CD7GxMP/뢄D#$wKb1vv|[um& UtN~`4&sкMALvvZwmݦ7Z94+W):e}xкMALvS)kfݦ7Z94+W:J`G]=?7r fL#főbo ȷȷە iG5-Lk$֭ &^]fAȶL2aрJ[@z~Ѻ P撽%@߫ PۙjL8SM1An"sDSn?\ԡᙤםj%(?7֘yu bShs^=?J6yuzy^݇|&`S|1zûIF!xTĘNmۈyT[:xwBrMYfMn211x1WRj ӯJn}XWnmSL0nZØ75đ{pTuY+8T7Ƥ'sG}DY)Aߒ>oR y9Z髶RHX$DðR&TC4QG+}V#8 K|VM =Z髶RBʭ v"w߽t7s/ob1`9<#QqkyHyjϛ2v-qf]-,_1)p|)A:2]\.O5,Mj]Hc8I)r҅-j!N=vo@7 ĦZ0_-N`BPJwX*t իJjc;>_ ~jh{\~[Z[ tiq9 nَ1:R_g0"+QbݷDշ~ݷ/W& KK}Xm ?U'QHj`Gߝ(+k:n{Q7=i9yyNkH >֭Ip=&c*31 e00JImͭbB9#n-TiIE1rQ Z:i4wr y#597^s/90U9@J mEP?zU,ΦH!)" Ƴ:_Jz!7Ak2 ^ Q=pSk|)X^`4Zip$Fcup֬~Ƌ8g.|X[zISdVGi,I2wI BW 6[/f" 63[UYr?wbq_o~8}_n?Ȫngy ,w o20I.$"L$JtLiߘ &L0jgkS<@-{r+ o*`kA0' #"`*fPbM,qDd@,˽5 7l-W(PGkM=O c߫.#@l/Rg%՘J_Rg*F  X'-Xj=1A VBK1r"$?,."",."뢾Zn3\zZ" sɑ38n%E[6@I8ѓ_??Z8ENڳPV1Itg5҃i`3p})"q>}aBWn:Er]/wQrupȇ2crC1ޙ\Y\k]1{1rL rrS$%tnRxUu\>"1n~"rED>–Ax0 CoK|! !8wb1?`LW Q`X9G^h9C4v24:h['vb;ո[9UO3B# ]E#td4ګ0^<~Ye,{؄$.{M7Ba]s㷲c l.Ԛ\*7RF#¹0 &S:_0I]E6pBtY==UYR0?w)`5d99΅Ƌ>@ wg'!Ƶs?翿?|\roZM1ד?, š\I}22gNȵ|}|'ەwƶOlZh}u! 'A/}֗**9Y)bS{y vq㤨]]PFaSD9ى`m3\x"e*DfN1oDvj˼FRbzMvZh*"(i g%ܴA`##xmo.r$mQvbej+5ݢk2^D Y]N‘GO[ Q.KbN7NCAӲ\_z `3iw" V`&g^1g,"SBBuΉ)_ļaE2,!s0(YKuz^!0q8GEcK(,R .d!r=Rہ@:o=s5SI=.WS) =sjP#jC1`ٝW{duvɑ[,W09>q Dŝ)ܻq#O'Ap %=%x5~yw<b;LZc9|YM֟(W7Y<HQ8",KG,_W%Q <Zʘ q?U0͗3ԣ:Gzx&P'eJ͍3So -Jl[q0[Aޥ=LGGavw2Of.scT{DhЋĘ.:[w}zm0q%Tciͳ{wo{6#P 1 xː-)I.?OsF)joA]aiKuiѫ@}Z냆1._ !}_~#Hz8Ƌ8ه+ٞ~N$ EҘIy_#P4/i^ӼyQ7:L Cs9c:r؄+ yK e(q;âeRJIa2Қ唋7K}fi@py˜{!A<+A ^yl-z.fBDbTi"C~FH\L#GUcD~܁7Rp1Oj]yuMu{̇w 2U$Y0>򜾥 xN9ԅr%x-o8JEL)0̂J- ATH:aŭ%^YAK14DV"(,͆{(NMB%sCB"ss $ 2jd *1I,)bbHnk$SfeJώ$m,{-+WI&=]XYJ#5Gj}LlGиJ +}RBĻ DF,08l$挬M2*YWiI/Wv!~5gHy`Lah$\Lλ]w4USXFJj;x3+}`sL.! 32ꖉ`Ql)Bި.\143R=%\#!2dP"ԃlpwd!A|S-C Ŭꛢ{:j;;XuEUwqû~:i Xvw=Ӄkq;(OZkݠbg$#Jp{UQxA{NGo)C8 Y T3[ز!!;՜]mW(`&{Wc"3 (%MRO;x%j&zڑmH[|~v2+bʐ@t6%Вu>W_ O?e*NrpᜠCOQ-?AJXL|ZNT2K֫byC$/1 I@zTY,rt˚*C,+7$b7E[ bL'u6m+蔫[z)ZwBrM)>nv-U1:F6*B(=\ގDmE24t*4n9lИ7lőPxW ?i%d:.Rz AXViDJTn|?[_ziXOvOn NLb _h8FRgHM8G .XabJ@]~V:Vj7DpJaUu3Cg=VZlk5[ydR!T> ;}vtp>$ oxDMpN4;5$@^pI;mqK5 MG')vj)-gOW Lf?Acsk[/ps/`i0&k(8) @=B M "(HI=oR눨=1wjqCv/AOf-*鎥fNJj; *!gSyRҡ ^GJE{=cE&xS.ڦ~嬰ƺUa7޵57r뿢KI24@7ڗcoN*]v\y.ZI0!EJ Ԛk"׍F}fR/WKÙuOj}MJhr1?4ߟxk[+?AhHS@p%ǫC7mFѾ*uTRL:]P'XRD- jႷF+uF8ke*1)8IW)T0 (ԊҼ<RDZF(A{"8A1 %;r/N^(^s]d!2Gn0֡]o^Gz@`ZӄmΡnBq$|8ѠGi ]B0$*cWZzf*"J0M˒&ʾhOIW&Q?"yɅM^+ȅ{3K_~s|"`.H0y}ry{su{ӊz{իf,Y6Ù=?W'#_:^gou+w~y={x#kw4fe !{5xē` ƠʠZF#*>%׷7 -8s:kVy!M/d3]_ FLZ!0=rAh'x&r7ѭQa%q/. s鎖)L\L8K+Ⱥ|p a'%8Jњ.U]&)\- }xclq/_=o=։qasn GItu2ă=XLcOr["69|E1ϳ:kE)#v^r Zbc`X9!xaJ.K'S+.PL,X2NaUtiey/eoOv3>ۆ_OY1״Ȯ.|=7 r'Ya9﷧{r~yE;wH[Ueoz fl̈^ *BDgE"A#. 2t荒[ 5"1[YZT= voQ %*lʤTrЭfRʤZ0cReLfD!OQfj G)}RJ- iiS.j4+5_Ry}{UQ?@7da`'-prr8)}2D(7̅V@?. KZX/2 0B=9rWLfZiljL (|YW"}?Z?^\~Uehdk )Mf bE?b<)g(]k[\8 \swz`?ۈ`1>+_;Ϙ:H]9/T%e qV(;ܽe~mĪWr܁`p~m[ օDmFNL*V9 " XӉ%OؘMML>SVB>` iQE)Dᥚ )AHSTYZh)F\tqR* gQJ_%𞷈C2F $T h|C8|82쉷*&W+,IF6cZܩBuMUErI]%~凿|{<XB67} qQy|ٿix?ǿ}fɑnJ~٥"c?,Aڜ?-ϓ3J*i|<$ӓ%^\gm~)YJi^ut[%3"|P݉ĸp8~1./8Ef q>Ushjׂ>/5{YHI3Plb.BM3Hn^y!|82;Pj㸲~-ȕ~| uQqABIRez{L%;K<ݓ9Y~|3't(u/p[ӥF,Svetm)~*-~ϥEwH%&ʼnѓsq3$whٻFn$WY\pE8/Y/Cn?~x^Kdq~dK[-b7[ݒG{Uz!.tńu)hGuGc,-F}w>teA|o=6s}׾]yx۳8ZS{_d߶ePTP(39U ޢ6N-o?׎=\VSLWluuC&νk8/EݟbqW\~,ޟ8{n6ˋrQȹdv`ڋ2_<˧GߩvO˲!ՙ@hNmU0'ٲn [,BT']tс"1u?Z!4+W(|ufc [,BT']۔Y֣Yxd}uBCrS@NʪumƿbcY>BsaL3ԁ^4\cӫ$Zcnpclp ND8oS}ݦH|րeTaZuHFsZ*LKkVZJjdR- /ZzZk%FZOjEKYK9:W2jh{`F'Ь6S:RZ}td{ۑ-8J$x,G ň)b1WPT(C, =Zc}J"\ݖ}wszx] Vg|e贬Y!]UJ"-JLZF.*P(2U1:7u&kFl?7G&5ϛpfƀM`c!B) z]*T c 279 >JEVF]`NBh#nGZ@ڎ/ۑն$FVg.M8J`֎69q[sVA#[RH)hNTϴ]ڑG]/uZn[[sAV1šAq z)Sgi6+7cDߛRDf{B0#h,Da!q?QAõ0ӸFg=ږbE m):IԂUvCudutkE@樹-]xNLl[Y )X$Vx5sbz1EpI(z8trOu z)')\XVIȄĦV&4d.ѲE*7'Eś?鲁"+'O&bSjC^lw̸vNll.{]W_ PvyI{t$ a3,)~'ƒZ0U)/;7-*I J6 }Qw5 jz̻?/[x$=8{A䓞2$=XS A(3ueK69`~IkT)DeMsqZo( uƉBit{N|񶴗ҽ. R-8$j!6)JE\ rB2KP6ЩjٗZN+P24YB Xk2q*Y1 2X1昖ڱ-cUXZVTHd+)?Om \* \JBg}і bEQ UAVQ9CDkpSٲeX@q~lp BC CJ"(Hڴ?J ΩsdsAd*ŹV"$dsGPc7/a{۔ʺ̠=Y+1'+o +4ZtaX|J񆃏vNOw !hymGpFev7<6uU OjFuߛd-qFu7ƅϨ2GQUZ|čp'Aftwӭ*Qr"\zzt뱛P;HTȃj1$,[sbN1r x*~)8bm7>B+?oXOԈN:]nvc\ 1\G=sqoٟ(9>QЛjL<1jb-˾ @O5 \ŚT}&M{ dg7 -xz3sl|8|v U4FGo[7擰n<Qotn].+@ͺCVhN)uuJ"y#)v.r`-(4v@Cr)X;"\If텝5h/ {F=T(-~6(D9"b)0Q=oz5EujtEK[K~@W7--+ѧ>mj0$T9Ty*BhUTJC +vZ*Gsʽmltǽ[6?+w>ʝnS EKZKôW4Ih)bTpҳR)ôTJoKҷnS-=G-%--fq=o-U:LK+{|j[T_(hi)yssP% 4N7F$ﷆ7bnQl 0RWYrֳl[ +/bXXXk4bdzE4M.vHB$C.n,ԫ(=bCXm}&(n|lp_i/Z8|gU7_`c?mQY3_ jE)]^}NrU \¿h<\FJt]x&-iMJUyYFHj [ZJDISⲰɞo<hl?l$4֘zɈ<Іl*8>8aQM@ 0' X_ެ ɠ& JYe]\3Fn2/Yg?$Kirfv7+{` qNwGbcT(IAdtn8deeUYb.γB Rզqzˡd#a4rwzڃT`}dymk,4Tva$[ :]>Iukdy-J&-30;ާ%hKdE?VR@Ilo~,l#iE[ccV]YX%0@&ҜFa1 i—bkt>vW 簐{ݟl Y&74[=fݞmh"cf^R*Pj.XJC74utԍ9lV tbFylE8jvS_a,`E-NvKQ.ا}Mߖ'f0` @\Q d\V*!A&I*T b7*s][TY00DYORw6(/%g2Yvhk̠6{h#RtV4dy S*ʦSeir]9CfD"K3[E;=eߩR.JSre)ύњƀ19pnBj[XT"PnF3g̡1bqa mMƬn`W%;?2wL)" C2H I0Y.R,hl{nKh/(IH;쁴{6R1m>h@]zPJ(z}4+Ðv+Qj4 i5{wω$\9mct_n^OM@;hbA8d ׇmasU_y0_ܺ,I4XM?oO~jnǛ$vG߀2wfȟ<ݥm郆kl_଼Om @!! BSh˸ ٕl"Sw4*j.s{bG-ѰaaUT?˝km ::?X0ӸB=\1D#BY`!;1G[Cʼ3x؅B wN%hXH߫3kG$Xϕ4dy`,yqĄfU)5a| XH mJkU_І`TA*BU^H! +Tl="/ RAͺF|8[el΅.!]#ނ⼮jS;'~Aχٿ87ᬦg=sJ~|xZ~tyˇ~ hX2]3§aDG9.O P&+&[2yȝ=sy^t}zf06`bP.n+iB:5 \weGSx(@+|7&k}fO7zq^ǫi5n "%a5\ 5í^jI=ߜgw]*/@z~Vit(kK_J 4 aR)!-Jd E%UT"-LJbMV_E2՞Bh8/Fw~<җgj w gj{Qe4BN?b+J,YN+ъNN 5a[_ӕ<~[~ 1׹Ί12&-%˳qTkn6"!ݏ6A%*}Ȩ4r㔾\dK6d\""<a?Pk` qefUK5$x̵V9jL8TO9vzc_YpaKA^]r)W+HW,QFep M= Iq ,ȶEUwqa;majz 6jz}mWUxW V{Ҹ7gO3 H9; 0VSi@5vG%"y@Ē?{Zs5r}y*$UQ1`&I-{!PrQƅaJ &5l! @dEQ 2܂%oTpٽ52PrD 9$#Hn=fnҘss %(IW#&r\s _K }-̮@*'B%h").[DV*R!D)dA\ jp(NP(zz@a(Z+T@so£kG`NŜJX)$R9AENK% ׭Rgk+!]u5̙@=Xԓ/8C'0!ڰ8Y°+Yp}myp0Z=s3N" z`]oΗ*):1$e aA"x aWa\1<&ΕL"-E c1sY#𫪘pL8E\戃D-#nWKz^~Ho1A7& RDN>I`A.I% չ/'S=EHa,NO6z{!Dk>j%ނa:YaYI(+2' 5==e 6LAezKK_-pcQޑ6iwOOd$HQK5,fNP-)?1q{-%`z?,^V}7j%'xߥQ%_&@63/j 0vv5qU~_=,LO2?GYwn[W>Wg" y_n#z>3ө wyV׳fi7t|Z]7TYYWnQ6EGϽodObeb:ݎ96'~k=[ M4ʦ=wQrD+ӉFvSG KۻCzp!>+4yRPOHH4Ә¡™vh4!PG.QA.'Ht; D$KSw2/a ,j1RC87 Ips(-RR˘U9RȗNB26 Cf5>c{CmGd_62 WP3&KtV[H-6:8UeQTg?u1B'!+3L-NH-[B9R\%T-XQ GY' Yppcd"0Xg?/~Nq;7)i/Sdni.1GO-| Ɣy?w9BЈeTε"44c)m+hԞ|e'1܂Ӝ"GߊD|k!5!&۪`p:/~jp,ԶL&DzgPmHj;{T:R(5%S㈤fx^:it#8-\)H[yM֝~>lLjc5ȧ~OQZԧ?1vgFo3m=i2 6MAm2m}G(D`Wm&W[/~2Έh:xn0@jMFx^HI* ӧ܊ɋ' 5Goa.() ( 2bj `ǜ]s8' ,<YjX5q$d2A猈欌4X(2\:_Kݺ$2+-BrkGvv\PVb(ͱ&ZQ(_}%.10!4&ZSb(tdȹ\ƍlQxU U:-dtWx`q:44 $(NyPBwWPaA60ӏM8W+% LY\~'Hb#Z[[^F|7J*Ե[KYJ)pNC0!RQ~7)?DSHY1 "6P iar=&I]5Q=YGt$G0ruY7t0FUC{Shq; 8]-v6-&JFd*i Xiީ F"E@*WLRXc~+HMc ȠR!E ȀsYpk\X2m`%|S1xԙ}+ή@!9q6Ir/ g!&+3:¶zKNtÅ|k\?5s[㠇Cok2(U78E餋`/(z ;NȥQcbN o,滦>|!~EYy!䳣E9] $B9&Sβ3j$ϔU4CyBk&z:/ ;xR\)tCFV^o݆mW'Ѹ%-2ں )[ק٭qVr zr{{9snKGnǹzև=Wv; bܿR %Փ=֋ٺNdn{ |faf,3_bӒxo}c6.1n^R͓i;yFZgYZL.vyCOvXN3Sk\FBk*l5CnBc+ !sDnK* *Ƹ cjڡZs%ji2%d3\42$!ͅU&ԫ??=^%\Zʌ )3<qq$E5CvȐw ٌZ?,%ƂToE]&Dj:!HQ{f)JS˧n( VCvtB25~y- Crpxdy)@]'yH灃 zCzzLwFo?C!%KIKmBV52)%0),C@imt޾Q̹S'I}_-"2A1͕6Rx)JB.pS$ fľ"P^%EcFB7#6ۻ'[.Of|-S?K)ݮ[۪9q2L9H}^XfX[?*h%eQ)sa)`Bk%HL*Pj.orьwD pP cJmFFe9Y9\Ra1ar @۽ d^z-LjQb@zU,po-glݱ_?>iJ|q/݊wK%ݝs>[*ZU6W\<_}z |u]ڱbj}IMŃ)/twjƙ|x8 B#ߟSw]Os]w=;so++z1by]{7׌Xy\l>RRjz=8ţ6B `rs6fPS]Y^´^4NΨo{;T )T9Sq^Dè*Dg )90ɩ"G'1Gr7_5Zvz--g,>KaLH1nw"^bmNTX%,9.Cow.PX}4˥[1+6g{;w|_.kW_|Qj[>6YGy?e~r><=Jj!{xAݿ4?|q m}NHPtFA1~8'T2|}7a`îWƈ|G8E1K>ܗ ;NP n/`!} O~ TC ;t0Ýv0LFZMF^e+Nluvi:M.ӯH'Ms2ip椯p0"CFA$`M+AX}D |ɫ zP5dƵK/@hDQ oS!&y+왢%>HZ8 2 I4ێC/wM༻Z)yλ-yw0mTsR0cuQ wvC΄"\.lI"s-]9<O]ew}qF_auI@7Ѐr++O.,~].1?vfVZDgݍFwэuն߮oonҼOd2pTa~>@hQwnL~A +Ěs]htr-2j:n_>\ *FLf+"o6՞ofE4İ^WT]zl#֤k(*X-$2|Z-k=VbŵrUu Bi툻õZ%oYe orAaImU0']# E>Dda^* R玧Br饒KI}-쥧XZVgj[j ^z^ZYbzÙ%n5Ri? $I{@:ȸi LMӬ.;G5e M2ʼYx!Ip^*d @cs}G~l jSR4 ?ny*qݕhV=Բջb{p~w7bl_Jt[ش̢]t}uYK3^xglrγJ9i*tATV^1jr|?E((yΊcƻd1q:e$}y"i/t8ǰvP/N8OdF':Gb9p9*06% C؝s(Th ->s9RP `I0q K yާwěP!iL[}m1茅L!YQsیmƫgyn.V/U"IOJb_z\  [mXo7;~z}VPU1Q5?ŏ{sb~ݮfl#?ůˋt}frCDf~ݬ5?wxҌLqd|>H;9xLKK 0ȭ 77)ޝH7lˋ3XDKf_A^kh%T{^|=[P_PHśIYۧ҉뗿16maލ2m9|+ ՟ӵsc?_|uk?m>z,H̢ K4WRS\pEX1D$$/ Դ,Z@x*Ԩ8;Gb1A -4`6p\)O ݫF|i6Q}."}}T~i#.Y?^}m~5|-:\Cwy0(X!xRvj h 8ܰPIq4Ot;C5 TXC]C^|'֘BeS}BO>#8pZZKxP#K12ѭ[AZY4%FF]A CT0Խ3Jr4% 8ύ>Ϸ?fLx􄥥C^^T7.7[թ_n}1RO]CWiTVy8ٽ8naݕwEAUrIQ[ Rp9lqE]zWuZ h!Ժq>!Qy_@F閅dSr4yO!Qh/.0,mpQNu~Mt0J6E%m]O 8G7 0AF6.d$ѭsQatCrS@h@vRWR ^ <`I}-50p=Xʠu-L ySD7k& ^mKK ٩2+w?^f>5 C<&!6G.Y՟^'m{ꅻ)%%Uem}aI!!xύJƑ萍F{ky3*mcxe"X@=x~I &:v-G;ۨ hvl8m:Mf-<<Ĥm /!QG ~x'KMXdP׈}H; )maR(kE=Xrn!k1ld@a̤ t"ml3zDN!DmIBՑAE-*0^YlZQ*, t2#,Tb&6犙03CIzM`-[K/UixVO\BͥvCeKBSLs=x;ۨEK;"2%8g.eA5fͪoRz `iPa/H}Lj'~ ]=\OYEX}lB`h$wBBQ)Xuuv l4Dp-NvA1,"҅4cZ]H]QgۖG[; qv9o\`9cD} Q:dnɫ'+%wBZK]i2NC0'0i%_x.3#͢ń]fh7sI8e =W^Ho׉6161 &&&| zMz NGB/_?T+qkKVe9F Z b-(4F/,Sl!90cV*mT4􊜎yOր*x { Zyu$4_T,_NIzqomnQUW 1ICN(':()K=`՞bF(F^MRܨ6݈ uH i"2 |P3sXl0` iVrIeȤGjT>cqP.z= dd(IgSBzUݎ^!KAf#P"zU0&=o8=YpL<(Ŋ֖Hv/s%zOTﮗjQ$2I^Cz!!M(OWYj`q>nȕCt뫃6m]r=^{m]t˧DOeDahpHJl8k^_Rq4MJiH G:/ex G6r [y HU(IlPZM <],iS^(yn6inv 5}lERQHSPBM.^ī_A볔ѣRx#ؒ:@Gp) )S ;8"=jPkgJYJX%U7]@ =;g2OJu8J-& 8*xImR:ɵUPSrͶflud_(P8Mʣ(ğ6@ȕw\{Mݚ8 GLך`F  =dL@̟Z$q L`wgpy 0j|ZW;~/ʅ Tq(D[`bxJ =x6äf+>& L*Cģ^<┷/uM/ |vJ!KR6J_O,GdMfx*~iZV_M&M.2o\n)['-sZ#K^Օ0h'#'E<+ySx``'B$uKd@%E'μڒ ]EmV0:8zYއr_N. 8×)JʑH$XRJw3N:t AϷ\jzDLNYnRtҎ&E'iqjQRt8ԊxOљtdR~ gt% nrSHDw%`U|82d}DcBcuB( <yՆStR3EG k ~[_meSʁ1cr>מP9С}GZf@@ K(h鈅cy KbJ)e!,B7% >ʵܿ ><!}H`֔$mM}L' PKgee˙H@f\t}67]7N(;A$\5=pICgpD$HǶHz^<\FXxA{^$bJtԀ-]%羬BErCTđJ( AaZэdPQ%sRC)7VZƨ ו҆ZZ0JiA++*tɂ%K%,#R48"*-VJ 4NVRjX%gQD %fW}-'q(s%%pR(fzJ MEy" Qϵ#/f@x{qt)J pC ŗͽ o *UңsԢ&R"c\ɝl5Vzʋ!TTBipMy&\ľ7qu uY(EO/dכ']%jFb|~5$~ķU/bثɂmE¯ů|{NۻUÐﯮ.Z{DŇ̗\2"~2wS%EQJJ=AuQdH->6:<+"yLɥݎ >{: yw=SIQnbDSKukMMZڢ q?U߽jqѻ(VS/~ ;=8fE,߿W7V|Zݜ e$7=WmeVo\-Vfl~GXvU\˿.Y?mfaןlC:t;SuwZwq95^$}~ޞqY ؖΪ[d`5;Q<σ]Q!^3௩QPW A$@]l~ʗ:pbRo #[r& *ϙ x_I7?U-k.{۝gó{Je15?h^y"у5%:|O}U3 Lh`_{8cM}5/Pf"?~0vCZD̏R`tf3EZӂv1AdVrBHr+0{I+(CK%+4)LZ,*|RȚ"8^~WH 3hJRAVJX][0UxC:uJ BMe} O0?~ъ>!!W[׋OWWnƉ Cl)3=X8ݒtiY)qR=(ޝGt"A80#e;v;l^ԙRPfJ *֑,(!NXHP9)Ul~dbΕ\σr_7?:L:#AzLUWTK$үHchA\vwvI; xӋ7@)'xvMD. 'G` ڧU~+/Eo][#U+^朓E-u<= &ÏV[?oM7Q>D|Ӗ c83,{4F;eVZ`ZR &v?">?\8'z./Ç|d,NA;oc2CU-w.Vo e9Z8lF/SάKšTܾT5q] ) OFg6=R@&ʙԕk} LYR1GX+%Y`3Ϳ?tS`rvH$Q,9!1 أ+3PО]rZ̲4[+4OrJ 1`INO;ũ%k0F)1hleA~2%'98A3Eh-,"Vܮy)ǣN!XfSRdyz7S L80rҺAi(BxB K.\(,NZOyΤu6 +%l+w+("pO=1#hPy@@KiD%rRmSJjR{[l Pn JEf#z3}ŝLi3@yˊѝ <$?33g@\%byj ^Qd}w*݁! +J<52P@0Vb; B5ltOj$eSqO<#`hq 7x(HH%-`s)0[`YtRd2%HW1i%3A{ix^I jT`iFnײ3A%vv$钨%sa13*S!PUQ6x!94ۓ29`JQھ2LήD" y { TOA|۟ "19H^Ҏ&H85Hc ]GA(NAI7KNN%%} gt%w0vC-Oq,H >r-2,H`pLciBB5A\{ ]vTb#4=Y*N&5F;jq2lMɊJ;@dZZ^A  X8X[pTU%3xD Zâ6#2ܡN<2uH$Kdry^qj\JpMVA/\.@pp:%/[`Lѭ+OoWkMw/onOgVp}2~;cgRY~Miooxq_{o mg?~F"({[D<7 ˿~;ARs5.$ˣ(BzfaS8jt}`S}`s&<\JG1?a]q;!}{:t HZ= ̀3 T5^n޸, vN^ӵHOFw Ʌ1*ZXlЌ2+ŞwlivӰ?򒝩(젯c"Xsv | DMȐɒزwd"*8q"tr6 rRRX8A'?Oug<O B2zަ' T;ڊ)BmH3,qL)~#^*E&U1YyFhJFZ0G00+RB& uTT +L%#WL*~ g!FVJ 9 i" R?z_5C%go/ <1s(H._5/~mh/ƚrͪ]R ǥzjYyswcYLX@esFjCiJQXY, (*'D=g 슺L$\!Ⱦ./JxPS0 YBxBx8? Vd[r)BCU6oĮ=7q k[ Z4_E(?zX Hb|~5\1ɧVG\~!7g0YV_k/ߞnհ:}mwSc8paSA g+s9xXJPP|<LgFU22Jt} R>zZ2buݐwC9Q -F}td$Xw:ܹɂ\} ovKZv sfԈļj(?eԼh (OHyi_{=]yo2ܔ,~s ZV/.5O K"xãb'tQ]Q>)H$##6oοmm 4SUgT>OAԪ]{R>/PKR:اе/S+E.ϭ"֛R($:t4PSIz^MKN G63vV} MJZW?շVr=f.8gʨ8B2sܑmbfk֛G\ʕw-2u\|tT n׿\<´?(}ezxshiҩɀo M0ʭ"7yڎV0 }OyLhM^ϱϣ0 tJ1&.J:UfV^X氐/Dlʈ8E7D.! tJ1&r(bѭC*3a!_l*IP{ 2.Sy¶TeڎKwN3nf|*pM WCj#0'&ं5{6xpDLlQW˰H).2-*STKTc,W>A/~P E6|nBpm:[d}MS?櫮onpcSYH5$w]M4znGqS޻F醄i;] pڨi_O?Yފ ™ßݿ_K( wO}13{׳C 8+`L`a6GK!7Oݗ#^cG]K#t.FS91$  >#*QdZ_+,k38ALU|U]b́~(~ErꜷlH;Dt=";lvͪr Șlu|9byaMC b|͓{yVU;cLj-ϟ][ڲo;޾_~%748xq?/ fS[\RٻfTٗ\:Lq6V3\.DzA?TPo G7g˜}9K])ďp*}'<1Jб;o\n|HQ?L{N9懋SiݔJ_R%h#S[X[}x㷕t~;Qoɏ)ɏ)؏'7qƃ!0m`>]{gS2PR7u Nk5rH|qyMp]^kѠ22t}S&1LIr0%hPTGYqS5,? 8IHVFX9 Bz;GZ [ȁ^"m8,Wd|;\zo\&\zJX,oB7km1 Bi&c:G> bPLf&iaiY`7mHƠ:1n)Way|>rQ"1ZHv& C,ؔj WϨ3A&=bV#iM;*EVkĄAK#yrL.bSh(:75kk) p3[2ތvcri0F$|vo\D~3:gG(_!Ks7Hƨ5Deg Q5x'D_|ʑR\OwzF/d́;Z5:9Tշ֍UgZi%iv Ea % w1Ue,#%AZO#VJ)^eѬq݂i nR]('|g8+IaZ@L-bփ0!BahdV%_iY\ 5p4e'삡g^E7elZR>+ hXlC%yuL Dnt5t!fIEMzQ58wMڑ1ך4_|͢22+1OToXfQB8X8ȿ5ij]^LIP_V)oVwL'ZJF adH +M#x ^PmzTe.xU66RƇ<2xd8j#Sm'j'Qm5!bE,`B$sf1M?& QF0GS BxC3],)!xA ?}u~MM.Voe4xW_'??y@,<76@KMmCX1 :i-2[CiQDՐj\{u#T3!aӫn?zSnK'mh6c175/)ǭf >@n}_\{Wߖo` ̅UL”⋴yMD>P#o|&{7{z(Z)b8':Bk?Vwv?XwT:^R#ǯwVPp |6A7zq-n'c&2,.i[dJݛ$>>qDA u0nq?|̖sNfК+kPXxݹuoucq2TM^kC~0w 8"M2؈SMqT:LZh*+;ZK1ՖK&Sm+8VR|֗Tۛ_.;io;CQG^:}sZ߹逵͖O?eϯ.Lw:P'Hߵ?)DըSpg5?m^C#{Y }OQLhMIq3fG ee:}EƉvV\Of/DlJ8 4 t1.lufVڻ- 7сMe H/Nz~ℌ'lů, 4KKdGP2WCy}afÌZК:xMo 9353N*O(Ȕ#(A7"GL@q2eB!kфkkN`>+s% 2L2vʖnb _ن]IkA|q @W@a8hŢYA+rH5yB8 #+g&c? ޘi{NgyQAh-N3mvS(S= Ezk3"b ):D꜕O\!fQjkиFRӑ1 J(ƙ\(5:ƺV 匽t`Ӗ-[&8zFmt# oGAb B` m<\_EavMń5+넯$xg-^b릴 Y}i:2>w$!xP $ZŪWެ^ͱɜ +M&&3muu@ _s)'0zrI$YJm_i*JQ"ie/RUHw;C d^$q\\ąK\u7 ,8eY}QP T jK 6Lф.P۞=dqRϵgFjdTysR2y t*G|n˜GhŻE'D-3#{ݛ%tYθYw;:F`ui6fc)DZ-qtHiD&+bL?_s^J<>.r"qBF;B㺃.kpn]G,t4)NVw-,c:F G `Lg8!AW] s{涍% Kr@}V[뵝Wu^\vR[53|H+Zx) lZ.qt5=݌}Tb):\ގT"vmXDvq%I;Cg$r$aRɬ{]c yjA UC3cnţέ  9H* 8.͡嘏\8.^EVZs-PdZGQ)",,2R"t@`cRϐQt7NgJ),b@٪u#I8nzKYeX 8QIq﵄QX+G`VICI>\ADFB$WuZ?5Uo,7 r 8Lq5*Wԉ*nZFNT;,Þi!p2.HdE/"2CL@VZ):1X=J)x2:='BSPYꤴYZåe2dBM):UQ (+-/F*H$6x5 ";~(Dn{K9u[[#!KQݧlYNh\SHN5N*):k%h-JHŽڣ18K%:Cm4z_):%]-(` pHъ5iJ1 ub5 H.jJ):1OXґA Z4~2ςw'EqpR:rQNӺĢ\5kP|k,֕8T5(hJıc15>[W0[KƮJrЙLV"N$噗V BJ6cnDcyڀ+C#WI׏#8AƑ/,tx&EȤi8%H)Jh+iK,vU8'Z?j)hf(bqKY-(*3KG],\`^a \ 䁝 E8;|,Jt_kG8ݰ0.Nn߉其i%R,,^[045iq%3qiL)IjNDKQ*ͰBpkʤ y D)JVL0uXL:hBQ#Tˌ*fH:a{S֙nIk<^ݼD DQpQ+Ɗ"Іw|Ҋ?`b6:&@J+C c}YP]")UJQFa~ W9'TiC_nDȝWo?J$ 6H1CC`\0W*FldSTxb9v@gj(`]ZUoιCyч"q$ȷW ޜL3Ld(YfT5 [WSd P|v5 +u K >dM) /͔dm'34}6f}W~|՛(sҔi!ܫeum(Wj-24bH~u^z>:/ vpG8 eFp#;OWݑM;7/?`:gᥳ iğoʟs1 /2/u'3?ͯ N__`h.LNۛ}pʻ$% V-vں &%l-gHL(<*Og1ןWS/@ںk+~n?p<,KNtA;Ozk }lq D盕7O)t)-Eac\d`FeؽTj0t~ȩ1P8#<>3zz>vWc4\NwJe` p]֝pdt9}>}=LY8KAB"|zOek(VGY>:x1Ŝ/o4V2njY\ wNG6Md:Eo.j~SNPAsn; 5lIrӿ@2r[wS5d5l$99sKޜp>wIUoN 2ʂlJ ^?+AleGppְ;e7olm{BvMեMG/{HJx^ ˘0&xy/꠨6IYٳJouȜ qQGI2'V'[wC|ZK"lE0_88jOyB5T/@_.,b}z`q.`25\(D>$)p"j:$vq{$ICХV`t] l UDpFCew8R$H*?g="&a|*4+*>_6E4p`Cd%&I.G>Q p!inl83]JiQ-%;:_ JqsMV&RU_ێ_CfzhЃ[LU OV,rȬ;M$2lxǬW},iynAT=dj-~Hhx "/\ (o?i6QYl9FklUuqf9~q| oW]Ƒn?!A00~<Pog.+ n$` fdyNTMfY<̙T9.T*'VRf$x)Tn/괦Ub+53;&7yW#ݴ˷fh%ƨc$C/cdVLP.Ɠ6 SsR\^w!Ǚp4RfC(TYjOSb3ScWX"gu)+Bͷ e'8*[~F p\#?=9!w6n})vg֌߷xJA5 w{s32}`v΍?fy5vAp}'&)ɛIϷk}2f7i HsQ﷚BqQS6qc[j_UWWum)[ Bc=7xօ@sy:@ yD6Ag3&F(HԹ%L#PB Ԁ`Yxfbjm"5jPwWG;.H\;6΢i$=7˝ dxˏnnZrf 5'<95Lۏ{%p}DˆHI>p0>Ή>?*~q$k&W_(4bheզer@! Vd֘ AxF,qcϓӚ(}S][PLHQTM"5Kdq=ڑL&,ikD3[Y"+k>^D(֛[s8_1D'Nj_Zx^K";@~k$<#ܖ"@vhit-T)e[Q=2O/ u!(lWF0Xie*E{r"memh 7!ma p117>24%^$ng$͉%sdOװ_EQWG٧(|Ê/O~~6l@-᭗}S Hb4r ƎPr[ĽW/s<oc@x'[^#zEs4A8|8B"iH蒺q|'Ж檘+s\C3,5U4;7*jBh `\A <-J %")@d`hE+OUS >Z&ɬ %{eSc%¿95 M]O??Z XX+2ՉaZ5 )a q*e0q1(ƒ!\P,)ZR^z \!:Ma8,eI YJ1`M.༟ ;]q2>hx2Д˙9s)yqk\*Ǔe/ͣ΢Mx>TOM+| ҭ)!u/Ihҭ y,ڈ DNU|"ݔ'!ݚb:MQGӊPpҭBC[YSE2Wy·z=x9:y@Ш"K|/BkׁG~\D%%9TIaFj?TTU?tڮEUyDWEW.r QS,IW1挆a )gףOlOݝ_@))߾ Tk80{)/\灊??Hu3 f5ÇhǦ&;nMyLV۱픴l'3x6S_SHNgZM'Jy^BI crIRh6t}8*q;c^#Gh:Pv_W؁ڠt&L8JOy%K%1%ogQӾ<$TPj 3hfB j`g+Lsn4Rp=hm`*3?l$xS68p8H3Si5 $161H8Km,!u5QV 'ZdJ&S8!8лss6n͹ݝss-/޹ss'εcs:׶IZG M:: >/H-q aݱ'\ksK18BΡءo(lh;čZV.ե0uVgluq)L5`-8r3SBQ6)Jhd]:Mv4]:MJ'P s$yNi<'"}Dۊc|ѻb_m-~}s:JAN~VG?uE@.Z?z1=>/5 ! KE2 >GdF}\8pa:z;qڏ& b%yPz&j}TXPQDHb(G.i1oJ \OB+<,Fc.62 M:PDjnat.Ɩ.UaSK0*ݹ vpR֟ WOA5&K7 [k Xr^1L F%s)+K1JȦ}Mb8n+2},$R/'v'"j; riDHZ491>P^Ueц#Bh`'t.Pvt؅/ڡC'°TJ<ENE'i5)>>_ѕRt6aR`%._ͻh탟4_lj x5Fa O>ǏW_gX ?k=p8c5sʞd<{Ȇ~6#M4cZ,ڎ}[:c~<)~cp(ۨHU*^QN1VdhZ: a%CN6"#ȼ gĴ&A>D*E$N˔!$FN`LS0 @:wFa&QBb)3 PgUhl"1[ER gC4 dh)x'L^u̪5}ݐ-.vu / zYHS!!>&f_7UzI"r;C7 VM9Tw௢ѨK-xj !="c ӘA' H1b<.9z˒a>F@^AO-EJ'P2Y^T#ASz|O0I9;B!,8C;|Rʏjh 1R|]™*"=FHu ]B*t ]B"sj *t q<0XIJk%W!Ď-,F 3'!HY$KMB9[\ 8fS7*Ԅ@o#Z)4P`|~>(oCV: !c:L)X&08c4Sqg`ؑQI.)4F*ab OqRlIH QƌJy iK|mZDg'`? 2[ hW+O%ꄭS(L:bo2gy !L$EKH(FO & TS=y!ʰn'|8¦R*sqJQ}ٵCJ'|,Dy4">$)Yj[vӑ]G] g?uIj@[ L@W& &b*&\'&e 9$MfPF 2rjᩎQ orԅ +1HJ7 (KG)!܂= mhl5[k1'0W r X$) rD4(u%o m쯴=7#hwՅ3P~|wx5krf!챤$נ^8w0G_̞!~d8':q@Iu&$)M)f\>mz#^A{yoL8Z.9f+d/0B*vM IɊHi)40ꓚ{_LLh'I^3񖵖o\~vU.h'};|E^2r3UA0n}6_D:G*nV-~qF0KyrG`qS ?`!-ݩ`W6v'Xw$; oHzs]+օB{+4 +>8J_ZBJԨW&8`nDERiH蒺q|'px-Z{]ZQำF禸htqgmLPQ~^@rw~Z;fnn%bZd(tqZ)  {*Y+/M/C~?~0[͆.l6OW/1={ori]M߱v4tSeAާgun<&uX\ppggsv8䅳h'ҍ#rҭ)!u.@nͳ[YO?n(ݚb:MQG*y0ּlbtkC^8xJO_Knp]s CP@5V3+7Xn.ȒЮʍԣڐĿ%:uFH`s}$E[3ɚ#i)-h#v]}x|QRNh"8Fq4¢DaƔS3ǝ kb#lӝz oKc,⪛ +񼍙c-W50eR E뉙~/r=.O`%Ռ9rmeAu2˾/oEWD9o )!M^s!Ei(;ޑKLA kqiQTYSn \jG s$s &[lY+:}R+0E3+Pwm#_a230axFY,9f0 x:5ﯚdJIr:b:~M A9zq3P,X3XFXmT_=Zb,I[Ff x.8-]斲-9x*hc,L}B$>R  GB)A(h P, ϶0d6 ͐mGi u [0KMN-oغz0FOݍLvM}&iޭ \i&m2<33DOZm})`n'?!Bxskgi6gn n\hcvrX񑛈S@ϫZn՜pm+"ݶftS9v֓޶rGxzm[kڶB6TR|fEvP#z-ȶS{J-Cn[/B1Cpe$d|Zû?3{h(ò:֨G'hS bXmX *Cx(E WF$C$ɀK^*5RJ:F(eX$2痻ԫ7+ ;b_nk<&idٻiPww;۝Nv'{}Ʃ;۝콡s˩La..yBy@HT&#SuHtȣ$#1/,dp8tj%#.IUŵjD+yn֊$͉'iIr[biKb2\[Xh Bsj,#LNiS}us`1 ob˙bDS獜7,5K5<6N-#\{$/:tRKGҍ*0OHaX`9  i#'~ EbM  j7#|$nF@o0PgvTOFD%Xy^zyn80 $?M|0KڎY2yhy8SY縉~9fxhF8"^ "~? ;p{#ڀ %X Q+n8{Qf[/-Ucoj=ұvu;g W[Ս~{7pg j.mlDqӠbNR5U7Yb[7s#yt8U=c"[9e! "?xE2wI&+Iѕ\,S6`MI\)b?zqUEiK!NKPo^.%ͩ9jfa?-U @6#8-39.8v1AVE>(RNKפe>:)㡬^ZL SM۩z@vZĵ4vTmJXo0m/|3Q 7h)(P 6E,Fޭ؋wzp?C뙉OBy[~(k&i=ȩYp<-E΁zWMp 8wEՄ?1UƷi%^4 + kۿE 5uZq6s0y#n䶦>'SQ/BbB}3 |JQDSk<65_XUH q)>uh% 8Mvz 'G.d;?ғOOX*uM'8lVvH"w opf3ynxygMZ`VSNoq{`&od@Ѳ)\W68(ǁ) ?(  B,RqBrAj *ASP ~rr2Br+.X,uLjx %@<0|ň2` k ^KjYtHI M 2zb5N|E)UdR \Ӏšz/:n NDؔߊ#Ȏk)*7q .jX ΨK5r{jeX.vKp{2VrYck{2likqì4:nx&pw5ycõ.dTҶWLBzk\zobܕʌS%>TVP)K`~ImZ!vΪgŸ1϶J N7jO"$u:" r6hRXkE( Vgz %(Ԛ@,BU֚ fEHYhYh!JWk×g sub+]+_`BVX0uZ"4rNP XjEK%$8J< x8d`4=N EH 18B #)-}A"-Hm 11}Ag'd De!͘X`aO`p "X92`=4BF<\h c[|Ӽ`EEDŰTGx-8 z`9/a_]jݗ}y HTp!!!%:e&)ZUޗpP?6Q (]DPZ䶳b!1k-/=RӼI{jm5mNb3B-#D5HnnRt#rrզTʐutss_{4O@f8+]Ζ{ǣ2tFOa5xG_n/i<TԾ?& VV'h?!}t:t..({K~_26-ޥgч,; TŊT)_F Xs:ā "ʰJ@bAG:iͤjo0Nk.2f<Y1l8`4ife=|wН2@?,V]yt6u3.#ԝvɅY8_O:y^nf<Ō8xgzl0fߞa`>/o6@[l}w_SÂ-ݷ-\M0L:tDZ@ܽ_g=7/_O/gܐx=݂"?z5}$~oN^ ooȿNA7Mތ'x\sq3.&f:ȢzVwA4e䞪f(…t1I{&[ЙGoTApq׃8&՜K^ ӻx7یÖŷy  t.{/ zCOw :?\{`6Q:??]F??v~|{ :_n>:&٨nMʮpf8v~N1琹 <3>3.{ū&s6ҧk<T4R~iAK3^Fp$2a<~ (LIަKVenbx=y5Hlg!5}3Ԁ u3ϘI9ŕi,}f̙/m[7>(h cLb6vsŹ vάw Ah*_gN<ƒX*,;"d.\|vwMbv& sݏ~K=̕gg<?Ӽ`n_ ~ԟwBi,gM|wDqJpmPN~2Xmj11?mr ( L6g6ME{P-銰ULͣ ,f[^/I&ǽ z2Vp&Ie%FVb` <ebޓ=zf%'@*LYZ-nMcb6t;j,ggF CXar5aRÑ3gyx鏪QfmJ D3q2wUC izު%L#%DD! \UPOXy4##xޱ8|~;3s(6o[x 5 ^$}%f;<;8 ǶV[y#<A ;=Yb5)dD꒑ӀՉIq*vL.dq۫O]sd)Y^Bc9bH1BqX&R&YJ9'pŐ1U`;y:H%&gݲlՆxUh M\PVLZ%T}9:>< )]#p&RѝA1J7E7~G1D>3P 76=sgx1Q59W hs|^_veozmy+o77pέv͹F-w-w7R] };דq;4Q8)\[;n^XQCAw[~j:HP,j#WS0 ? A;GSVw{O*rI}@yN?q7an񊋤 <; kS@e/?Ϯ_5rF+ Br6 b)կ/~;AP 㫃x+t7o'sŋ;*6RqkHDb#jIP!ǘ۠\/)^7(p\pQ*2$KڽϬ(rLX@]f:RH <'X@̵?80]%d| W{O*՘ u:lgwaN҂emI^u dwfIV#"Ga G5:YGmiRֆI 2'r.a DZ*.c&,0Gig[ɐśvA wi%ex/kù⾤;|}Y>̨;X`#83Xi2;BCDFS&$1iw7B_ҥ|nG7Xt$]'>[[iW 4n3:sI|3^謐"nuQ\ߏ2Sxgz8^qruԋ ƔqH>t6Xv?, Y,01LiJJδLTMx05BPDTrTm8W(}1dJmX+6Run%a{[krs1877 SEnnˡ9V(Td[ SLo4r^9tBk0=j(;*jO :8H q "zOA5X@w RnT$EQ 8b g ,YF#ZiamֲM@c< 6ڨ9;؝k_|ql,2Zғz&.I⚟tm-M5usMH'"Zl}c~Av: :?MmO{g^jw+d(nC=P;/؃Tt@M(ň(UP4eZ`aRC`Ickx̨MbġymtY;nbJa$m"ζ,2t(?no^u[EQ94\1B!QܚZ30w~'^o=@ 5=WW3$19\2O ~i @&tˢ8&ԆK}̂8P,nYS[f} K۳{do/QNNv0kJ>~pD{c>JaH*s #n4Uj<%d\8ݜf9Y b&\*1k$Q7ȏRpcnE,\GJ9`'d]{@rҕVZ34FqBMTJXZX *bi0IR M)F%莬p$[D뙆@̈́49Vi|UBRP,Ӡ,5ͰɈU&Icd5,/j V4s#دIY%@%1e`ℓYbL2a(D3>;R MnF|Cf6HK!3eFRi*M0dLTQe+)9+@ mlJZ,EtE{XawE2OEN\v+;I@_̚$m(({ }E;?㷜!VM֥/""B$*: oV'S^~CL)FKů[bcՋǓ M=>u-%x=y 5VZޯ>=EA ^לv N.&"qRJORΟx;{5]Hq~jq1_5~Xq&Kb Sۥ&-}c{9hKSR$.`Ei"5YAatRؘf2՘d"IkP-*V<;b+{z5"gXZNvp/<򡉪L~癪^WvW,6ϳj0gy>nx:cDhU9<բzLc:>/ꯁV#׶Z$c4#!߹֒)RnT7ڭ.!S*퀫/J>.[āڭ EL1t+U[] BDnUwi oٵv=Pք|""S׭0wSH/ <͇?#7Lܽ=2ݳeFIyZfG{N%tX"!Xְtгd R)r43]J]N IHreʬgMbBk=uQ13 yO_bk<̗ʃŐgg'S:ZYbymFz. &?{/s ]G!&jwC(k20 &M=.ktB&X}BF~6$FrgZqGц>fY!Yʖ j410=2&/WqX3o3< b#glǵJ̨,R_*!,J ݯW(Af5a͡GlO U]?Trڌd~CoQ 'rLh)w|M†rCs-أ+rPVբ4R`b"8 ijPRf @߫E% iAjjyl)6|qeCI'-ZG {; sSe<%Lhxvc.^l#CSxrsytE_ʻ(~`XMwݒbgtQXߋXJ>9@ ;lv>iNH"wm#Kcvfi}A3 %LV3F$I&wn˦LRFX"⯺(d|#^p~0"ID[&R>DzXV2יWv.qgQD.',KH3n5n ;I>"8EqtHނh7r@3h6 p\3VK4qs.Ow̩bnȪuN@ Ȓ}[;O>e'1p^IHiݛ\'/L@م$6,V @Wyuӟ$7UmF u4BO=WS|{+Z0Q}fy%Z%MӚ#\T *#[jTɢ/-]}pr5B,$F $Z13JåY e'J%_(#K$K JdS'R[}abӇvȀ[`P$a{ '[I5"z/sƽt3qźI<=Hb8F[Cb ⨶5/(ˍO*d{US;`ݵdCP o ?>k|aYj:M)/D#7{i v#Wˆ8VF5vE)E~lq@C{)wv5Gu,M9eXR2"15`3L-%r c $kjlG 7ǽʬOV3)X'<_x'F]+}Mәu^J/]LM+#m3iVjV6/ ȕRy8`j ccp9aI5q\i/>ZS0߇uR.(cAX"T1BMug)n8$aܹB8,LlZ#\zbUhB%4PHcWއUqBP+PB7; C7ZAXY]Xtp] N|v0#&@'V~φ>=s)}tp@{Id;$r y)naw=>0zqā0cM*Kq08CwY4#08 OV(܀kd9Y\ge Pc/}y~>̀K?vi֊yi㉛6z=Ɠ~o<%T&֮Sdr=\\L빝&hRˌ/an'ROnrWƅɺ/=eO B<I}`.@K*|Xokw; bFe:Ӳ0Tuʴ,Ot׸-91\W>.䃈ǚ|o,IYܔAXYo[*yCY&ZqdMta#I8@yENӫ _O/~?{7} ިsr8z`}?܆QeqgMob罫d~y/Qkw;^O/2 _?!:RŠUg^!ٙsN/wGk!A?{:`*ѸwfG(q tzޟ_s&SHNF TE~?{.eڥW_s mWϾq 0;3⭻%t53_;OHOo.|{6uj C۷6Lg-^zail{//#nOo!Ȯk8x5N:?ܘ"/V˱Q ѕhxs>3)koP~i /T>~:׾C8b`\͛?k>maXƎ_Uf|ab|gn[9} uqČ{M,Lb_ϥoA WW?ΫpP<4>N  $Z;(KOi`촺Ea2_Ż 'a `:-88wb?˷i:g,Qs:wf \d}2=Ŧ9`_f2>|(Y#S#UX =23ad F6$oIDxO#I;vz )q{%XEb: >Dh?*SJ'6BG>Sń= =3&v3K0X0a5E+&e6r3<(pZ(U#TiqR 4BD+E+dk&5,I' DƷFZ#aPTzB,25X_f}FȚI 1A7&5ƤbRbuI㔙@C I!6B1RI. q10&@ )]]jGKV#.֤T LJ8eN~|:z9<=I>7*%R᠔6@PBl 8/W@X,OOܗк'9ElBܦLp'NS(23_z_D˕!^8 :; cWm%a-BV&NȪJ^QETi 'S H;P/e0>.*K)! _vWŚ,:t2.4&QE̸jK d V=Eneqc(! hT}b݉0<$2<0Hz.Wa H=Q=qìp^DI\|U7qG-ʹP)iqoNVa%<کVa9AX@aq VZ9Q%ja%_}J> eTk-DיlN8Lɸ7R޸d d[SXA5;fNl\ON+De+ΓOcLz"b:GJ#&C$́IpAPSX*iBljR%j'$,0`S9=ϝ3`{aCiw3%ЧpGeN>nESտ##w ;8@IS=XuG:8]XU݁7pVC$N^1GuhM` ^o5w:5Δ2'LdB1 S`a0Z!HPCg'Tyh9wJ|OzARYom\4,S7yn̎)c1N&m'&Ԗs*㴉g#zӻ3d|7NO|o4<1{ONc$w@'WM_'a\e3̱zu3#hKzw'WOA$q%4 yQ)kňK)Y< X$ȖZy(8tWvuݤ)MaA~L0s> |9+ 061Ac*+AEMQHW*EelC5~T$|P̜;Bdӊ9O(0>/J!A`W嵫_-/XGp>mD_hy!bߣJ } jXu#gVv0'lcvRBAԱa h@aR+S9✲N"e l\p;y$1,)eގ =A%8!^/,+I%d(@B;2`CM((aK@kOQųmR }AP1Dw1b'j28ì@9 '0#WmE24(tmev(&'& ~'6-"HD t[!H0J0 kiS-Bj'%YZPqUt|pB<#.p=Hw89BDl}KuṽDUrj=Qr_cYvWP%e -B}bR<)} A Fwv (@_Z;J?-0AHm1hG(u?70$Ȍr 6ʪ)"~&:9>s[[/Qq ؞ }Jq|,gPM#U/$~L#Rt=KHXP br810N(啣D+ ڠrif$QԀ /]Ca^xQ 3bVU\ m4^1 ؔ+抋-d\&"(`XJ33qM r2-J\cz{@" =kOnԟQ?{ƭ쿊?zO C۸=9 ҦES\>#y4w]Y^ɒծduQҊ;g#|^6#C(g[h$ K,^wg# dxs){e:`m|^_1{QFZ"5b8\]0&W^]O|OWޖ[-秒iv^ t#y]߶YN3 $*bʢlMBwe! i6A{͋/'Av?uL&=h ;Ñ+q)JP!sn̜;sg&媋$HTS34.chqis`\߷X 6`=^Ikq |fPf *0Nxm0S0İrBߚj|;1VkKA}D'=JS΂w:sjT@3d HJi!S5FxsW<j!^sϖfߎ袘c CfN\iV5eud8{4!NfL,|+ E1/sgLq[$p! x)EvUTHtfx4ƅٸXK+ʧKt-щayu}^3;|z!╅k:9r^fʲ^C=۟ 2Ibx A_! I\aLe=ʠ1ݺ;@h8WEOc`ۃkO@8[vbo\nMh֔!H~T51tI\`*HyM "fA#ˈ6(:ٔ7owDy rׇ?",IVk&ItpgQ= 4bjKg>d5x[/?*Q*'N>ۺs/^J.$,imRMN'ATl@NFW)~ ^5"ʮEi863xRuP;)Bߖۏ-5{j2ZRDgO ^]xm(֘90d Id DKb-v\}'`?3{oQs@?76~YgqtVg`]7`a&%=\IfOI7SRisXt "*/~jԲ29M1.ü+N.S=:)ϯ~qh<z~%^l¿1|}K7|zsX9ٟW>_zs?|=.b:6|R\naxqϽ`\ZV ?]ťٯѕ(ް?䍷I?76Y `׹,vBϲ-UQѦpX>9Y>Ƽ0 z':&on^_ś" '߲90hз{g}?-@o.|\|(o߾nb$#V@dc JZ^?^ohސ9DklJek#.񾑍DA ՃzbƔ칽iH{߫h YCQQ`ϽN5=q|BT#LAzY:]ybAhƽk9*'6I!#)IleDY. f~[ks´ȱcu dr#.V)B#AɅR{Xjj&. Wu/h)GE"x.C5Gµ;Y)<8!8f(Wa #5ʙ+9GNbJ`4fS<+eVϻM-K~eJzM/)PLn2ItTT+t 0rC㉅X(9DǓOHHxNbN І&PMeWp)([-AԠ6ޠ~85DegQBOa$$ٕa"U*\j&uƇچbM4-Ȉm"j)Ւ#:־=DaskDa=BN^i[}LBc=:Ij 0d!1dL bLh5n:_WzbʳW\Fc@U˴sQD'l˲? <m:p$5x܂Ts㷞&Wi VdRPv lOgndw6q Iv}|ߓI>8qc4&)W39@?G1d F ?LC7LK)n87==“k3]N/i*(O's.(nq;+˴%.]0E*Z)aIK%,1oyrs#1!27̊<Z9W wab)QXAclԢŊ$WҘ_Nĺ$*ֈsŔhf1 G㩦 oq|nWZi44v(؅5ex+SaQ[X?{Wȍw` lLm l[YJ$ nɦbwdl_OdTk`DÆ E$gӾb\˜cUgT)XoDd*Sx!` &3v%RHzؘ\_rM:T(pX;V=1ZX\%J1svIƃ+l~'g=(WT >oWcT(7[M* 뫫ek/¥<_ɜXKnH(x@r{QYgi0[8gC#/'{U/+NJ7 njwkFUm3L+uIl6u[$A ,1rAa[f&X$t`]xw9'{ TR0ii MA4AUM~Cq &W1s@wy"МƀjDvNo87?h NC3u2J{tPP}L~ HuHA1:$n#@s$ޣfU4g)G!~Bxɝw_1jll5j/O0氭G5}[# wKStٖ݂5OϿ<yQ]sԠ!ïjelyG /ٚm,?cZ>VrPq2sNqB-&Jj#8pE 1t\޺*ȱKԭT ;ԹW)r!1T|HIʫ[Wf1j"ݫQ.aЭxq(.]/Wa )'97RUz=_z5ڗWA=cűvl//G蘱\Lk]XJA i]ˏ$AўΪDk}~t#2mXz^궓3 `TX)kTK d6WLQ!AI!6)Jrp–əZ1rNH^lNkzY?)}aH-@G+HwV1ͳ%9w N)K4 }4?R*p*& 6 )s pm JьĬɴ H&8 O7$/vcZ^u:S %gT+CR8rKp\{ VSg/+cupZ#iNMBA: 6ib;($(}9Fq<ە>Vʲ%},D3VMCp/~j-QSɿo(tAg九zk}{fS}˯on?ƭ8 Ea[>OLi{u8 ۉݹ|1o :G`ڱRˎV-wD)NWjtAP N<_F.2(P$Y!KȐ e݅S,E@ 5G!WWc7kRp|WDQXlCA'ޜ0ZJ9_v}MWENیYYr>}#\\22|y/\|N쯃3+N%EsYC䥰>\n@qA,qżk~*̝,YKjWnI6O^nΎR11wTng15XVrM)nʇK- (+I̍Jh bpQa[ 2̍)\21JiztdX1fӮ(Ui#? \<_>f_L{QN'_x''x8TW_DFv KHK)4mhU}b4AdHۏ߿尀 C|[yޫDXSdL6-k:XT%$mHح2KQY"D۬c^ļ<  xuWGtgʻKqvm;[R#INi.V} P_rBE Vn#.(8"3N6xHm$FvrVӑJk崎0{=v9-T:spixv !0Q;NO;j1 YN՞ P!Lyx<-X,8Dt,9scxqGIƵv2?sdeIƗ\>kΝ\4l[a䵽`bS=zt)Jێ.pJXtV.Th{*9(N~[s*r"GEܘ6l٘8 *ao.JN@EԈej=UhґN@T#;9(͢-"JL]hKŴq-g&W#H(`iԠj(Nbhs( BQ?"+D07b+}H͚<ށl̼(khѸ7M4y$  5MGn8h7:vk@45w5@P=:N"T[{KY;y~^ @!Bw"OK=ȟg+s@iWk( HI(Ä!(p1x,'E{u۲d%_:-+(%z,ꓚ\ jt5wW[zTCCT2r8Ɍ43[ LFhMS`Jn8jXfMv_y-rq@ W$ Q3~9n ?7E]BR#Q8 la9S=k?}O`zK9:(0OhsE@A RJf@i4C k #UfoǙ}@ ^(*=.@hat7EϢbQ/ڻ?wN͔8?PCH&j[Cs.X*׊%,VaBB $ƖaP s'*xLVvԘ!ڥm?!wji5uY[R@ t4B ˜SfpԔRDX:\B&hwY_$ ,u"} 9?Mj*s87RE](ZR&GIp:r铖ruX+7T]As dQ;Us1*>qPbiX\r=ܦ$KU Pt=R]H0-*J%y-ȻVrD!ƎclFJmeVmÑJe0}b5 >1cTIY,=y݃BV^Wv Bweb:BdI=tȨ'4A$ZK?ٿmyljJqҩ=4!8 *(mD&fIPUm?sJb*r[8-vcZ壖Oݷv~aWިXlOl9v(SxP'Ǖ('x.NzW-{WcPC3"]:Q:'wsmH1&Wq蓽R&QD+ yx%kwأ :E tNaV^Y«Aα*0Ţ;L:y9"kSPQ'x^^\@HG(^O L`*5̫4M+&i2Pg3nsXK-UBj42n "1 TaK1; 1VY2I ݄5 FNA g(fbOp|M?[dd?| *0WrR~ ~xtI0aL y> ^4SŴxۉ-a`ho.pK5fkznLgs|4FwPJÛIt6/ t|L31PQ@%KL/z9{ܳ|6=Oz-Bhy@wC䮣;vERDJg*,m0'J=}3_9燜Θ?}1Ɩ'%4X$׾ЩҊz{AC &4xQƩ͠ns|!Vh7c@oyb 0@+vVznRYG9Y(usn4܍KHP#u7B&}LbMO| C/bþ̀io>o}ކ9 } syq.1yqkV*#50cZ oχB ލg*K'E׽9dfvSk%|>`AC7?:# zyBi(.;wx#!ڐ/ypݮaF~<2zz{vuţ=]G uAS]\89;g[޹['/E^ꏵY='^4?n.'M@+ƠG1J|pegrW;]Cܛ[|V~wkk`wiMۂkaOh^z#sᓳjPgvdZ[wVYV 9%$h k>,OQ B/me ~bcނp{ngg2j!x{uvצ,:;ICK$݇zQnGbHĐH͹Vb_Aj·+GyLn^zd/c.Bu8#KuXlfl<ӳ2h_UBlAg.~0ƒ2l/ ' <I/ڵ}O< 1I./(>xv?&ؘћ f|>_|; a>oSe9!ЯzaRZlwre)ƕA&)Ɔz7Ɩb8ޞ,Rr` tKTڃGo?]w<s@ uG>GX e +ݩB;JCq4}@Z@{bl iWㅳH-Dgva,uػOokb ;No>o Cq:~ʏXd E ”G7阌\+ɵwxd-EN RaD\ʀA!:] m7a4_\Ppevb%3PVB86V(j/[0A}^%c45,i59K1n^&%U]e@>UH̎%r^YLi) B mPpfzǣtۚpL,hۍdX\_@zXC)?ڡol2>/;r_܍p`b RPSzeBf"o~S7 "&?fڢ0K g$kUyx]ko m䥔-UƎ_,qV;ح:TXie\0S)(1CX')Bfb i^bÙ0NPo5^2_ =)+1)suTfx1+/ TB[)6`!0^Z-IS"(U9 SL>c& 517t_.Q/G.gR JLS?)\bCeJnZ> Ts9EUZ k zway=x os@3Iɛqe` r]p.}{ʸ-XO!8m~a GWwwOثpqw$|V5]B&}51;{=Z잂4}oC0H@UiJ>.m)yJ۠՟=DZcMKHe]Ì1c*9v̗+{k<#Gw n33gC م,bg̃REb NAIg*v}}CB~*nu: tjYXCeS凭e,/gLik(+#. |M,:nGm9_6,&V.m2Xm/P(ڢ,żiT{Zɘ+` ך5;`@=\;An=kD!vHI%*{3dIiJ8@`Jl$ ˄q`2;K,RXᄙU"cU"cJ~COAʏ\i4tra+x;SϯWfX}g`O)Wc`xg Sd1pMƌ 8cxz 0fX112J1vNǨ@z*o$<%c>[vnŜ?w<1~}TE1j;va{ c .-;zIv!~0,H6JeKf[ cUYnPL'n="9i;ϙ 2x|νsHk0HN {4lk)͌,N#/uy͐|6mA(6C06[0kh;@qQWY0o %ɀyc :X&_r{f*w2Y\͛2~,Kn>Yr)²E>:ﱹ{p.'C1>Y7{\0H :qZv; A{<Nd 9%_w$HUpkyݳEc[YWx}턑PF4 ȏݩfd' Ijx/ IRס:%gX输lǒc! FQYI-F1 KQTBD02JXuYiig\Qʒ1Zzd+8Q5FJiR)G(H>A_"*kCj| %lP_yBrL`TLGIԍB'4f:62B1X^anU֡m:<62*LMSV۾,ih,rVyI3^zJe׈Y_!aAKe*9..Ya/>ZZ٣˒ɏw9M /Ds^27@0Èk y3\T6ąq)3D\~w<81h9q8)sYhg{,)usؐѓQFwgqɒ,&ä waWc+,Dy2Rb]*k- V'GJ=H%dbx9yx gqA 5Wd gq3i\6YZu#Y<^[iJʳ9F&(bֽ&BTb΍J HMm0a5ze7Qm+h`ԉ`6Rj9V~6$Abi̸#ZXaF8e /K Os>"srgcڲg֫AŒz2}rqL hNҬDKMkn's<`e gE٤V[8lzL@Zn[>Ar2&IsRo"fh*MPF [RN •QT\aTi!9 }۩+UkjǏy0O7drBju+Ŷ>J;pC4Aa7_8;;{g\Ⱥ ߟYC\mH?Κ2rdk겛T;u5I@p%EdS d)EJ#Q$n4ѯ ^d 8 @3m;Vأk]@_j4Я$0K>:ƘBk4oL8fBu*B+'ea`'H vXemS@⧂ak`yGu(!I;#1:o{d%I17 `47PQǍ tTVӸR ^jϮ0x;Smަ^!ɜ%\[g)%D,ɼ>J~{kͼ ;5 q0LDӌh`nܷbFؔ7q\m~_[`)-MpXFhi=UR3\*" 8ȭoyE &6U#   کf+nt8WC8LA T8ݪA?&4:kLB`-߶*R{+-NjC%`;5 Ty-F77mħ'L'I9H+k IN ٫-Bɩ 6?h7E޳gDhA]Q==ϛͯkD=r2ee,;qC=4<3T:iIe` !kCh% $ZRgR%b̥Z:q뾨wgh)tE+Gx5*hiu,5ţfڛЅBr8LlGVtVkcDk.WOl?$T};BKL}uB9= -ti Ń,?ɬ8pqۻd< hU)5 hk I9ˈ)` firKY];? e ^9 |7Le7i/ b_ah$6TfjFzz( P DJgHdm!&u-JW({^Sګ5Ph)u#1k~, N4΢nǚH4@%J jAKh%yAſqݲ%{=ZoS"FD r5uQ`!8(6 ʨ>>S Ms:4 9``R2rX6W/7,AZ-2Hc8@oFEks 1u r>#L2ޯ1Q;S jeZ\N Z9I ,pc@zU- o3#Ω=B8SϏU1O&[0Bzy鰆ϭ6q0\LCt^L( wc_ OR $RvHjP?V פ,Vb7,STS!Lj>zRoμse^! e%Z`ʉFA%Pš,}s'zI ./PmT~r˿[QZd{QΧ 52 SJgfI/N4cTj8[! èM$̾2!ScS}0HQj0^d\9Ji͈KZeSȈAD1ɱdYoeP y責q"$m0`:oː:c gTĬvakJsʰ4cza2oCYmDceHϗyqX/Ҽp ʦ"V)󈴥"*t3׋ߟ??<њ "[OF.M:t҂HIN"zȒx2#i޸g3m ,4tp_a+mT~V3ɳx>,}Rh[sW3D0Vצ`[eYQ9>+r^D:A\5G88{^3kUv6).(d @bݦR꣛ܽ#~%vzso7xn^tgN Y',ku嫨;kk^e,FɼZmV>h嚈IuP5VsrH.78l&Fڶl[ua֍&]Ciqk^UOb9$u30:ڇ9ӠТoOibaec0 lOj+J1cy 6cէu$D[񝐋b"I ETܟxq0NwRF*y_bS~h9J5C'A tKhBOm dVl9}+K`[&anZ/zdZW#*moGdй8-f2#HW>mĘ ǕOLCJϢp\^%KMG@ %eA_ |YHa,!$,hQ(M&Iˉk# Biu_TvZݗā{Ks8auЬ*`#fKq@!ju*+트Քw*()] JQ7o|&-f|wդ*bSt:XK:Xf0UjyukTU 9L9|D3 m]|px +\CP+ʶTj(=1gt4P =x6V6k@ u#0 6o8< Gj˝#iEeVh1!$ ӆ$ zOyۏ^W? ww1mla~PgJ {jZJQ*SIj[EWX͕L/r%U?tj.;u*)` )db~3"Ii% m ;&X.ZL嵜37(8}]mԅ5g/?ypNcYn_x<7 Uԃ]^|w;P}e&ܕHY5!?8)}[{DV1SU.)/nnMDJ&8g:<%-ˮ#J8a"ݎiEp[`ҭ Yt"/g21ƥir熵Ez-r*mga?-GNn[^Z/5M::],?<w~Kv`GEqky}2xt1^6z1?~p'ËC3_?wGy@K \4#j+izey;Y?i '=ӽ/_/_ vjn(}Rnax^{;<'8;K*ٯ\q~7C+gq- W@hx6f,+@J.;XQ*<_H%W5KA϶W|{F~m: 3ws1Y7 /y|x<څ/OФ3F a?frԽeܟ>8Ŭ㷀O!e(V\Ǡj0t~ʩ1P ,v|ǽū|a~;ɿ Bi Lppx8|<׾Ef,~ѷn͛} O!$§"Vxfz=0><\_s1=":d_4yU2njY\ Og6u){E{VGFwcVS 9)P.`>h}w ARnQ0A~  Q1ܷ2TpMwE;=twE5ҿ;;ߝCEwf%1!般Nc25,*fݻw;+{G_hRM7Vʘ4d,kkI~&p%%;dL |2O&>߻ \Ig`R |27]<-&Nz˸όPL1Y;I^औ` (6T ydlWȦ2e+!Yz*C2\I|dT=L,f]ښI'_*c0l\an̈́y*RG]j2!cJc۲@!;66G␷5ǒ#;K䓳H"*Br .KP`7VfNTb)2U հgCϮ!Y&]#]Uoڕ*~A]k`E1m ƉBӸ]9)\p`)d\ˮRKΐVSm@S\ -̑vӠ`vx6!:^<0;`=<{fP33mĹ`0C:~M#d$^&ˋ3O+3FǛ+}Pԧ@եv8-F /pWV ^6\{5]m嵻^Exzk08\4 $)qϱH2ҖjEc$n%4#7dWA0;&Q._eP6PeI|Gt ^=b71)-=gg:T'u>`9~'Ͱ8*`-.8ɴ#~_:s70&(nxJ-h):wWu漙&I6W ir;?`{' Iv4Q &^0/|1o~O+'5J˞FQh;.8AlSӂJ ucE=mtZhA#${nkuZ Q2,3myhji0\ GJH56*xSR"qHƈua5$jk[jlǡ25/3l˧j;R64_?r&C; ݜSܒ-Y%_l 7vONZPrlb_;H Hn:b[\k;cTzs$mu=B5+iqm:g{#5GӋHh9))=zYA)uQ=2&g8JnG#ktԥMNg lx`ŽJh<-aµ+w|PAn*+op~u~ رD]jT~HƮp'wr;-KȂ8m\вqG-봻8'KmW Z~F6EUVc?D{أ~n(([_N1E*rctBpcSgF7PGt~Fv*ޢ[6m MtgJ s-=omyF_8𓓪+'UW%[)>9F<o\`s4Z$PHKL1,d\LWqms%gZֺbN<#RU.WG2YD"24}a?}ߗ|={ClVn3F1skG8{$Qb ꄯۭq,Dp#VH8wZ}K=IҨ!w0W;M|oDZQ+X3Te;C?{ 2IdpDSȌ%Nu@jfES/#26ngi5ۨ/9W>9W b~w-&:w86ލ4HJLhD͜A]|(KTI3!䬥D(0dg]ATU Ȥ; E(#ȡ$ʔ,DJRJ0._/xiUv8&N1A-SJٹSۑ"@N`i4MP"EvyVN>J( YGQ AiD:qݍ4į,HEMjA#_,zEt&u%::{ZNPr3^̍")[3>!\Q%Rb7>$IGs 3ۥ򋴸4N\h!jo7f=CuH~Hq[v2$C0$K63v[A@X)p<Qㄲ;Jws)?OJ_\^K3 Mԯ]Vz+n7aZ G5|n&ptBߕY)+@+ς6BIu8Pk6W8MZ>n#c!HjEj9F;˳#Q,~.cbOc!rlDbBO)ۛldM6/bP _v9W 6e6I{xI<x=IzJ[P6֮*Đ%q[ xS=+]NuSr.4}չnGQIB[,lB>&ݶCZJl&x{|5X[h|kzq+ۣHA„ i$k҅ r[wq\:)#_Zl#KJ2_߻틮Xyi;Ñl#RZt2b7oΎFD+H;EduN s1ZFy#}0xƠ_kyƋ~hE"#8]/R+X?O9|S1tҚE֧;.?$_W=uѫp)U:_3t '0eTIO 8q k'ɋ>g̀+=~'d&Gl }Fa^䋸TXBEf 9S}=70*}NKNq(j6g)>v J^ PI`-g =-ѿ#2*<1j2+@}ugz2Sp4Yc KE\ϒRU|̧Aa+[-G1G(6G,`hW!!P TZ OwzkH8Q~ z45\/[$Mv4kz/t ; <=_\]n6Zt)1vs9r Te`n*"!fXCoe 1E22RiJ.mG߸|0J3\: }<6{8=JSsdJc phwžAVu)yBH1 FMf8A bx%v÷@C+(K04{-^e] 4BIq3;Q ϣwWqo{I6`w:VˁQi]fh]\ O/ >Mj\w䋼w&c) F%N*dIМ~d*D JI;&6Ɛ)BRLnbʲ3b'?{w2G5gFK$(B*Phj.&rxFѲn')-Ϳ}¡vۇ;95r{;dO`r PwCWdVvvيR S#[QwNN̟m|b/xg޻SỤُb `tz>MR-V 9ДiraBRz;z/r] 6􎬹B5<<\\O=0,X\^+< P#ojDowJ !-e:<> JbI6Emta7h*5=*5ÛrNMaYjs8JvTb耕TIH)lL/K[.n_o]@l,Ȓ")9P#b,sr($p4 X8b4*flCZRT@CbQa)n\r"D =e,~]GK@uRJ*A*& *yb(2~-2(Ϊ;iR"ϳ*﬊7Ũzh~V]/RKJJq" 뿟MF/^Z!ʦ v}s=u)PфBTTAi %kߪaSb<լ| MsjD qgP5ny[*Fu4XˆǠNi1A"ɠ?M_6-9ȽM/RK>K|-EJ10zbYNd4x)Ms)E 'ko퍲N㢬gq(L8Zͮ)M<, 3|x)Fܖ ^K*#ƻxS~e&g%7})xu$I+ƅJTfS9̣n' jϕQ ?MKvq;3zjE)RY1)=ykOd^|@[: cmuWmffBm%&@[0G)uu-"F Bzs &`ZZ5J&\k"x72c䩚(J 1@0},"t2ȽFZwXD0Ν"k%D!nE(@)>jhTd^5J-qıƓORggq40͠pz?' r43VN RcmW¥Zڭ!6zyy jsun(Hu4io]v 8|NB+'[C!Օ8nvzG=koFEm0y?&E)$MM gzܸ{ԃe$[4"951yG<7 >p?D3UT7)ZGk53xL'Bԗ d" S6/q%Dz$ ˌw`aCZ bt<[|qJo=JW"->NI>Ȇ6|d~h_|n *飊c nE¢8ϣ0ofw((yܔzrtӿ RMS'P{]NA6 3*ogK5|}K}/4+o^:_f>9k^t:^ NKp1W~Ըݫ(y]"V[~DLJp]d9/ꡃI3VOwnϖ˪\s&\VC-hHg90˷$5Y@Z ;[b238vJ;DŽ塉3܆MXexd)c,i%k Tt:fmLΥ3\ӹWK2UKWO5* SQQ+S1+S?{<]/v$݇`Rٵ%Z!X nM:$̆*?G>+?djN(^S5wƏ2l-̯YAFWÕNJ=tЮ|y6d Iܪ(RSyDz R-ζs#⋞̷@Щ/E 3Id.${1v +]ϜGE o;Pm 7gB@Q='ˇXsnx~uW jr(SwJ%Zm"hW]o ac ?L.lr)/ePHh?BF,FiSQx'&s/yd$mTmB;*tK.D"i6B r&2lbh&4( 2S΍J"CU-""" c\sokL  MF▖kBmW;LBP4Xő@QGfhyj}-ZP0ߡЄ.$Qi8qVĂ k/z^'N{T @%* bJQv+SBviAXDУD4RD4E8" vi8[2`߈ф!vNN"{7+ 28N:t}Ҕ8rVQ @) (%К&B%LoػJ$skJ5>Lφ SwU[g=P `*4nEg`f]ro{`u9{nI , υ*[F,L%ީ^vABQ08%qPl"l8 kƖ'L打l6ElDf %SpE8I kqMh;JD %>pBܹٸ-$c=ZM}l%:T>WiXk5.?a LF}D$1W*4BV=icA&㺲Su(Cv .pzj#z!hؾsցSt@VPm͓47m}t1g[VsA *daQQ`΢sRvBgke\]z얄 Msb4γ@RIL FZMs=UMpCD59QUԪUs,pX6 f> (-ENPP)eز#3X*|]±7 a5X"YleLwC kKI>[TOR-F6uAJ) p7gPQJQJ)E셔2r҃zYPȴQIRai'ʹ"y3xŸ?^XXc`=P5noRaִPM1Uf=M!H!/)EʔmLoPPʫ6P+_,s:nwx"kn뚄dL˄!؀wX"aaIA08eEME;Y66tZ٥zFϲF_(l8ף3X߀i҇XS/K`4[-gPZk+8,Ud(JƱpÅE1.8%9)q{[<МqWnbهmiպe%aO9l.33`"=N]P݀ yt_GJ5?.Za8dX!ԕɻ.EeqJu2Ǵ,{gTʲ׻|,$W󝑼*h8Go"fj%6Rǫp]zR#3_5K0R-*pj?fZzaW:gH7VK]WpψU)B?1XSkf p,ί\ A#Mho5]V5 v9[~5_Oj֚Yj^| ۺ=_oTBE8{twC{vZo~pܙ$- F31{?2ttFp7{-WFDWyBPX+@^zcC=ħb@+k½BlkvG,H_90?ԺRdWUڃZA9 <\0#el'$p G{YS+Bg҅ Urk8_/qU6lk -~*wlLk6mˬ{o3P@@@Dn`/|fȒ "6\KvJH8- ԖM7U{C0v_*3٠ [l߳dm3Mth"t.WA1$e)jyt=Y_3]*(& liݗ.G=Ga~^(*<;0\z͖xä\)7;LьJ5ԕ+FSO2X-Xڍ? JMwvwG 4o'-kʹ]7u1öYaT$[4fq"gH, )cǔ[(oI6:6j"z1n:-Q+4:Fw? j)]8q&3lhÇAoh|_Û&s _CDU6WOU<4MQE..xڌ i+;?FüݚUi%I.%nM9hq?Zn!\VqB*Oji̸s$N wB  Vh>q쐍ukX$E6q?GM(t(~8)݄&7758|e&i%(ŠA֒?yZ_fb]=-cڿߊM6!ۈA5 w:3}f coiRtn #͞KIn_Mv7?vQD+qƤ aDsOO/aoN=.@_Y ӳ%¿ Oq)_I{h |qx#W]¤b~vZ?'.|~.8EI߽j?-.  n+/BڼMq\5y@6qk׳C;p<%70 U0ԓ/>b2x/[og{FWǼy_7vtShr> (W[C'm #S*sM$xQP2\H2\_\k yNr[|Eb3D#pAN#^<\Wgű/g;QlO86<|-HT;I]Z"Zq C~m?4T/^^[zyˬ`TJH$d!'ÈJ}eUJ2 P#@j-`oaIsf ,ky\NM`3,xJR8b_,,wҝo DQ8Dv8vń$ZD#LSBBR-`\1RA!l wbHVT EŲESq] 7AJ >}Ó:#4ь@ (x DQMVc J3sYK>'Y#=+L;4=o"/B "ke|T Aj<@zTqBryR!w>V'eD/Tk'`H-ω\zYEtZe嬝Noz}<8 ovr_pkwvwz}ۓߛqx^֡TP2=XNƳvYW4.W4tsot~z=PjSˍ S/.竡GR5g,^m=;iL9d/ZtI8f]]āK,ծG0$IFi)Ώ w⥦] 磻(=^V\n~b]ɉ^sLcN՝\^\#.?2ǙD^랱(u6qwopqBJ4z^<~٩C8[0Vڣ-@a'Aד~ڲtk I1_i(RhA`x {s5 D M9^rljc#Y=.Hݶ 8.] ;19e+?4N$KLoT+F>@hZ5bړJ\MxLo/]@o8/ *(xqqy8u\yP"#x+dM׸4Jsmtm^LcQ9ۥW gdk ]x1F=`ܫ%R)*8 s62pnQA2,|w,cBY;c5fPJ(koNZH?W} 88S|uV~|׭ƫOo䴑s7MJK Qr޻>θRDgB#{FY.߷w).`o$m#lѠ]ȆM,vR#obԥrPosѼ4sdc^*5T~ݹ)5T5SCU4 P>V.C|Bє8#mﵹ|43WmLF]:+),;t`xUr邞tөtas_~?3ng>d_kwpm0~ie lԹkWiuYN"56O\(G:rKP6L\(URB2γ]-JWՉ~+A4}If (-gf#ʶ)W!߹ҩ\Xg`EuQĺj7VѺU!߹)L{ȫ-v@V.S"퀫{nk8Z*4;Wѭ'5"c!:%-NU~sԸN38}g_ɰ1 o6ذiUjCo|{v@ ~;m$NّvY8dApWzQEIkjQ@}$d0bӾ fds.h/GD. 'y_XeЇgl,H\Y֭ƶGmx]HΜջ7xлKBDªH$9H^R"9۴GlOqɼdPr"4d 0 ldI>%`wU8 B\7M5{Sf&v`@)r<'đPdF\@^݈e&`yPmiV C͛V?[_aw*DIdz!myWCE8ៈeB|}ƥUʦJL!ӛy, eB%ԦkydK Q\nSK/kalXM^$ }<}I\h$.IF$4 xKI'"`.A  J!KwkEk)#s,8;o-_8JhGV({*&VO\ʈ]Z,UݫJPE޺̞.eO85ԂI#>! ZDHDҐ+b4-[0o@/"L<_CQj(#Ġ0 9 o"Bt !1cf#?s92O'Ao#y`陵< LZK0(Ȃ"+Ι+r^6mTƑ,Q4EԻ2H*N&fkJqP1` ԑ:9T බ/ (re=xB$+Cܺ|]X]eaMS$ 8ߴk DpP4M7mnytY.ЃNVXSFxBZ9$hDad|^b}<\hN]`R@pF% )q)uQuDʂFձǀ"i+&˱- ZsN%'d+ִ'9c&aX*!u &D-Tc[fZ,-˻"R8bhۥ h%G%iSI`hF; 9{Ɠ\Kq^1P <G83)!/ `!_+i$U?gLUЀlMҙ@Aêoύe䧓g[#}rsHkroi')V"xxW?,^8M-aٞo[͚|twlNhYE9\1>pzݰ|]4hsj0xb/|vzW߽|}m=L?6O#cQ⯗ݻx%0/y4={g;^O.NESR"go}na /4.o:s)ɥ`$@M<6dw?Y#X:qb'Y^wv>h0(7SČBS*.7_8'r/]N][3~,4Q2|l5-OgEns}tn5D燥 _{CC^/Ο痗WGZnL~qO>_ 8┪I<~<ö @y1۠z0w%Ҙr^zidI8f]]āK,ծG0$IFi)Ώ wWv1oo LWd߾O LK0>$2k$'\n~b3-Ncw>Վ&w63\ŲOf%e +]"76aL]p @L;oqo-Lg;K  O[ _/ eh٥joI@Hx7LozL&a%!]ZX,J@0]ISd18Y:ر~ g5vG?+u?üAUeTk\qTp,]^X4Mn[.+3_Ea,9bӬ0d|j`Q+;-[{:͵_z*v/5~<[eBWkѰ(SD%%N Tx #ERqN0<;9:' vN5 AoV=eÀ.9gztpvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005127671615150355743017724 0ustar rootrootFeb 27 16:23:56 crc systemd[1]: Starting Kubernetes Kubelet... Feb 27 16:23:56 crc restorecon[4740]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:56 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:57 crc restorecon[4740]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 27 16:23:57 crc restorecon[4740]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Feb 27 16:23:58 crc kubenswrapper[4751]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 27 16:23:58 crc kubenswrapper[4751]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Feb 27 16:23:58 crc kubenswrapper[4751]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 27 16:23:58 crc kubenswrapper[4751]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 27 16:23:58 crc kubenswrapper[4751]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Feb 27 16:23:58 crc kubenswrapper[4751]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.270061 4751 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277074 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277104 4751 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277114 4751 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277123 4751 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277133 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277142 4751 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277152 4751 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277160 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277168 4751 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277176 4751 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277184 4751 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277193 4751 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277200 4751 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277208 4751 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277215 4751 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277222 4751 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277231 4751 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277239 4751 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277247 4751 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277255 4751 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277266 4751 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277285 4751 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277294 4751 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277303 4751 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277312 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277320 4751 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277328 4751 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277336 4751 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277344 4751 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277351 4751 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277359 4751 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277369 4751 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277377 4751 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277386 4751 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277394 4751 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277426 4751 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277434 4751 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277445 4751 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277457 4751 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277465 4751 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277473 4751 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277484 4751 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277493 4751 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277502 4751 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277510 4751 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277518 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277526 4751 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277534 4751 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277542 4751 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277550 4751 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277557 4751 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277565 4751 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277572 4751 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277580 4751 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277588 4751 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277596 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277604 4751 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277612 4751 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277619 4751 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277627 4751 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277634 4751 feature_gate.go:330] unrecognized feature gate: Example Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277642 4751 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277649 4751 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277657 4751 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277664 4751 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277674 4751 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277681 4751 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277689 4751 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277697 4751 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277705 4751 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.277715 4751 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278577 4751 flags.go:64] FLAG: --address="0.0.0.0" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278602 4751 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278621 4751 flags.go:64] FLAG: --anonymous-auth="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278633 4751 flags.go:64] FLAG: --application-metrics-count-limit="100" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278645 4751 flags.go:64] FLAG: --authentication-token-webhook="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278654 4751 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278666 4751 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278707 4751 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278717 4751 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278727 4751 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278738 4751 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278748 4751 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278757 4751 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278767 4751 flags.go:64] FLAG: --cgroup-root="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278775 4751 flags.go:64] FLAG: --cgroups-per-qos="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278784 4751 flags.go:64] FLAG: --client-ca-file="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278793 4751 flags.go:64] FLAG: --cloud-config="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278803 4751 flags.go:64] FLAG: --cloud-provider="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278811 4751 flags.go:64] FLAG: --cluster-dns="[]" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278822 4751 flags.go:64] FLAG: --cluster-domain="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278830 4751 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278839 4751 flags.go:64] FLAG: --config-dir="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278848 4751 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278858 4751 flags.go:64] FLAG: --container-log-max-files="5" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278869 4751 flags.go:64] FLAG: --container-log-max-size="10Mi" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278878 4751 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278887 4751 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278897 4751 flags.go:64] FLAG: --containerd-namespace="k8s.io" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278906 4751 flags.go:64] FLAG: --contention-profiling="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278915 4751 flags.go:64] FLAG: --cpu-cfs-quota="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278924 4751 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278934 4751 flags.go:64] FLAG: --cpu-manager-policy="none" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278943 4751 flags.go:64] FLAG: --cpu-manager-policy-options="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278954 4751 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278963 4751 flags.go:64] FLAG: --enable-controller-attach-detach="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278974 4751 flags.go:64] FLAG: --enable-debugging-handlers="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278982 4751 flags.go:64] FLAG: --enable-load-reader="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.278991 4751 flags.go:64] FLAG: --enable-server="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279000 4751 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279012 4751 flags.go:64] FLAG: --event-burst="100" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279022 4751 flags.go:64] FLAG: --event-qps="50" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279032 4751 flags.go:64] FLAG: --event-storage-age-limit="default=0" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279041 4751 flags.go:64] FLAG: --event-storage-event-limit="default=0" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279049 4751 flags.go:64] FLAG: --eviction-hard="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279060 4751 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279069 4751 flags.go:64] FLAG: --eviction-minimum-reclaim="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279078 4751 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279087 4751 flags.go:64] FLAG: --eviction-soft="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279096 4751 flags.go:64] FLAG: --eviction-soft-grace-period="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279105 4751 flags.go:64] FLAG: --exit-on-lock-contention="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279114 4751 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279123 4751 flags.go:64] FLAG: --experimental-mounter-path="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279131 4751 flags.go:64] FLAG: --fail-cgroupv1="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279140 4751 flags.go:64] FLAG: --fail-swap-on="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279150 4751 flags.go:64] FLAG: --feature-gates="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279161 4751 flags.go:64] FLAG: --file-check-frequency="20s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279171 4751 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279180 4751 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279189 4751 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279198 4751 flags.go:64] FLAG: --healthz-port="10248" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279208 4751 flags.go:64] FLAG: --help="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279217 4751 flags.go:64] FLAG: --hostname-override="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279226 4751 flags.go:64] FLAG: --housekeeping-interval="10s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279236 4751 flags.go:64] FLAG: --http-check-frequency="20s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279245 4751 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279254 4751 flags.go:64] FLAG: --image-credential-provider-config="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279263 4751 flags.go:64] FLAG: --image-gc-high-threshold="85" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279273 4751 flags.go:64] FLAG: --image-gc-low-threshold="80" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279283 4751 flags.go:64] FLAG: --image-service-endpoint="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279293 4751 flags.go:64] FLAG: --kernel-memcg-notification="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279303 4751 flags.go:64] FLAG: --kube-api-burst="100" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279312 4751 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279322 4751 flags.go:64] FLAG: --kube-api-qps="50" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279330 4751 flags.go:64] FLAG: --kube-reserved="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279339 4751 flags.go:64] FLAG: --kube-reserved-cgroup="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279347 4751 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279358 4751 flags.go:64] FLAG: --kubelet-cgroups="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279367 4751 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279376 4751 flags.go:64] FLAG: --lock-file="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279384 4751 flags.go:64] FLAG: --log-cadvisor-usage="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279393 4751 flags.go:64] FLAG: --log-flush-frequency="5s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279431 4751 flags.go:64] FLAG: --log-json-info-buffer-size="0" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279449 4751 flags.go:64] FLAG: --log-json-split-stream="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279461 4751 flags.go:64] FLAG: --log-text-info-buffer-size="0" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279473 4751 flags.go:64] FLAG: --log-text-split-stream="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279484 4751 flags.go:64] FLAG: --logging-format="text" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279495 4751 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279508 4751 flags.go:64] FLAG: --make-iptables-util-chains="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279519 4751 flags.go:64] FLAG: --manifest-url="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279531 4751 flags.go:64] FLAG: --manifest-url-header="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279544 4751 flags.go:64] FLAG: --max-housekeeping-interval="15s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279553 4751 flags.go:64] FLAG: --max-open-files="1000000" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279564 4751 flags.go:64] FLAG: --max-pods="110" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279574 4751 flags.go:64] FLAG: --maximum-dead-containers="-1" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279583 4751 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279592 4751 flags.go:64] FLAG: --memory-manager-policy="None" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279601 4751 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279610 4751 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279619 4751 flags.go:64] FLAG: --node-ip="192.168.126.11" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279628 4751 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279648 4751 flags.go:64] FLAG: --node-status-max-images="50" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279657 4751 flags.go:64] FLAG: --node-status-update-frequency="10s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279666 4751 flags.go:64] FLAG: --oom-score-adj="-999" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279675 4751 flags.go:64] FLAG: --pod-cidr="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279684 4751 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279698 4751 flags.go:64] FLAG: --pod-manifest-path="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279707 4751 flags.go:64] FLAG: --pod-max-pids="-1" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279716 4751 flags.go:64] FLAG: --pods-per-core="0" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279725 4751 flags.go:64] FLAG: --port="10250" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279734 4751 flags.go:64] FLAG: --protect-kernel-defaults="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279743 4751 flags.go:64] FLAG: --provider-id="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279752 4751 flags.go:64] FLAG: --qos-reserved="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279762 4751 flags.go:64] FLAG: --read-only-port="10255" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279771 4751 flags.go:64] FLAG: --register-node="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279780 4751 flags.go:64] FLAG: --register-schedulable="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279789 4751 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279804 4751 flags.go:64] FLAG: --registry-burst="10" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279814 4751 flags.go:64] FLAG: --registry-qps="5" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279822 4751 flags.go:64] FLAG: --reserved-cpus="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279831 4751 flags.go:64] FLAG: --reserved-memory="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279853 4751 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279862 4751 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279872 4751 flags.go:64] FLAG: --rotate-certificates="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279880 4751 flags.go:64] FLAG: --rotate-server-certificates="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279889 4751 flags.go:64] FLAG: --runonce="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279898 4751 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279907 4751 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279916 4751 flags.go:64] FLAG: --seccomp-default="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279926 4751 flags.go:64] FLAG: --serialize-image-pulls="true" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279934 4751 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279943 4751 flags.go:64] FLAG: --storage-driver-db="cadvisor" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279952 4751 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279962 4751 flags.go:64] FLAG: --storage-driver-password="root" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279971 4751 flags.go:64] FLAG: --storage-driver-secure="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279980 4751 flags.go:64] FLAG: --storage-driver-table="stats" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279989 4751 flags.go:64] FLAG: --storage-driver-user="root" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.279997 4751 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280007 4751 flags.go:64] FLAG: --sync-frequency="1m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280016 4751 flags.go:64] FLAG: --system-cgroups="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280025 4751 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280038 4751 flags.go:64] FLAG: --system-reserved-cgroup="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280047 4751 flags.go:64] FLAG: --tls-cert-file="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280055 4751 flags.go:64] FLAG: --tls-cipher-suites="[]" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280066 4751 flags.go:64] FLAG: --tls-min-version="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280074 4751 flags.go:64] FLAG: --tls-private-key-file="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280083 4751 flags.go:64] FLAG: --topology-manager-policy="none" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280092 4751 flags.go:64] FLAG: --topology-manager-policy-options="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280101 4751 flags.go:64] FLAG: --topology-manager-scope="container" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280111 4751 flags.go:64] FLAG: --v="2" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280122 4751 flags.go:64] FLAG: --version="false" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280133 4751 flags.go:64] FLAG: --vmodule="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280143 4751 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.280153 4751 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280384 4751 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280427 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280438 4751 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280446 4751 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280456 4751 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280464 4751 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280475 4751 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280483 4751 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280490 4751 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280498 4751 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280506 4751 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280514 4751 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280522 4751 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280530 4751 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280538 4751 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280545 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280553 4751 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280561 4751 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280569 4751 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280576 4751 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280584 4751 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280591 4751 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280599 4751 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280607 4751 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280615 4751 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280622 4751 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280630 4751 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280638 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280645 4751 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280653 4751 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280660 4751 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280669 4751 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280677 4751 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280685 4751 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280693 4751 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280700 4751 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280708 4751 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280716 4751 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280724 4751 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280732 4751 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280740 4751 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280747 4751 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280755 4751 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280772 4751 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280780 4751 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280788 4751 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280795 4751 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280803 4751 feature_gate.go:330] unrecognized feature gate: Example Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280811 4751 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280818 4751 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280826 4751 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280834 4751 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280842 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280850 4751 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280858 4751 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280865 4751 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280873 4751 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280880 4751 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280888 4751 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280896 4751 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280903 4751 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280911 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280921 4751 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280930 4751 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280938 4751 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280946 4751 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280954 4751 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280962 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280973 4751 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280984 4751 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.280993 4751 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.281016 4751 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.291804 4751 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.291833 4751 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.291956 4751 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.291969 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.291979 4751 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.291989 4751 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.291997 4751 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292008 4751 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292021 4751 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292030 4751 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292039 4751 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292047 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292055 4751 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292066 4751 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292076 4751 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292084 4751 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292112 4751 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292120 4751 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292129 4751 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292138 4751 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292146 4751 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292156 4751 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292166 4751 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292176 4751 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292186 4751 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292195 4751 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292205 4751 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292213 4751 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292221 4751 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292230 4751 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292237 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292245 4751 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292252 4751 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292260 4751 feature_gate.go:330] unrecognized feature gate: Example Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292268 4751 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292276 4751 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292285 4751 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292293 4751 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292300 4751 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292308 4751 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292316 4751 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292323 4751 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292331 4751 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292338 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292346 4751 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292354 4751 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292361 4751 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292369 4751 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292377 4751 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292385 4751 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292393 4751 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292426 4751 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292465 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292474 4751 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292484 4751 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292493 4751 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292501 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292510 4751 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292518 4751 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292526 4751 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292537 4751 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292547 4751 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292556 4751 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292565 4751 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292574 4751 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292582 4751 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292590 4751 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292598 4751 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292606 4751 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292613 4751 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292621 4751 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292629 4751 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292638 4751 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.292650 4751 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292868 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292882 4751 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292892 4751 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292900 4751 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292908 4751 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292917 4751 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292925 4751 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292933 4751 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292941 4751 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292953 4751 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292964 4751 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292973 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292982 4751 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.292992 4751 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293001 4751 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293009 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293017 4751 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293025 4751 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293033 4751 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293040 4751 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293048 4751 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293056 4751 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293064 4751 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293071 4751 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293078 4751 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293088 4751 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293098 4751 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293106 4751 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293115 4751 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293124 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293132 4751 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293139 4751 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293147 4751 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293155 4751 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293163 4751 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293171 4751 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293179 4751 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293188 4751 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293196 4751 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293204 4751 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293211 4751 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293219 4751 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293227 4751 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293235 4751 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293243 4751 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293251 4751 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293259 4751 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293266 4751 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293274 4751 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293284 4751 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293293 4751 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293301 4751 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293308 4751 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293316 4751 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293324 4751 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293332 4751 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293339 4751 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293346 4751 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293354 4751 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293362 4751 feature_gate.go:330] unrecognized feature gate: Example Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293370 4751 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293378 4751 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293385 4751 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293396 4751 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293430 4751 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293439 4751 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293447 4751 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293455 4751 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293462 4751 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293470 4751 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.293479 4751 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.293491 4751 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.293685 4751 server.go:940] "Client rotation is on, will bootstrap in background" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.297846 4751 bootstrap.go:266] "Unhandled Error" err="part of the existing bootstrap client certificate in /var/lib/kubelet/kubeconfig is expired: 2026-02-24 05:52:08 +0000 UTC" logger="UnhandledError" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.303596 4751 bootstrap.go:101] "Use the bootstrap credentials to request a cert, and set kubeconfig to point to the certificate dir" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.303738 4751 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.305527 4751 server.go:997] "Starting client certificate rotation" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.305615 4751 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.306434 4751 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.335083 4751 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.337534 4751 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.338092 4751 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.357548 4751 log.go:25] "Validated CRI v1 runtime API" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.406234 4751 log.go:25] "Validated CRI v1 image API" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.408180 4751 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.412715 4751 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-02-27-16-17-47-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.412741 4751 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.427465 4751 manager.go:217] Machine: {Timestamp:2026-02-27 16:23:58.42485324 +0000 UTC m=+0.571867707 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654120448 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:b400131a-a657-46e7-ab90-a8b42c88e909 BootID:0e60b9e9-7743-4d82-b22f-dfb39efa49a2 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827060224 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:4f:23:f3 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:4f:23:f3 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:d1:91:9e Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:d0:c3:d2 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:2d:fd:06 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:a2:e9:15 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:f1:a0:6b Speed:-1 Mtu:1496} {Name:eth10 MacAddress:56:3b:c0:49:7f:23 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:96:3b:bf:2e:af:33 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654120448 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.427677 4751 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.427785 4751 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.428963 4751 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.429140 4751 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.429172 4751 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.429376 4751 topology_manager.go:138] "Creating topology manager with none policy" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.429387 4751 container_manager_linux.go:303] "Creating device plugin manager" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.429863 4751 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.429894 4751 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.430310 4751 state_mem.go:36] "Initialized new in-memory state store" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.430390 4751 server.go:1245] "Using root directory" path="/var/lib/kubelet" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.433540 4751 kubelet.go:418] "Attempting to sync node with API server" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.433560 4751 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.433576 4751 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.433591 4751 kubelet.go:324] "Adding apiserver pod source" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.433607 4751 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.439035 4751 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.439011 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.439027 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.439118 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.439125 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.439976 4751 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.441995 4751 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443537 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443562 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443575 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443583 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443595 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443602 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443609 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443668 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443683 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443691 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443702 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.443710 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.445778 4751 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.446172 4751 server.go:1280] "Started kubelet" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.450496 4751 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.450883 4751 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.451023 4751 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.452091 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:23:58 crc systemd[1]: Started Kubernetes Kubelet. Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.453607 4751 server.go:460] "Adding debug handlers to kubelet server" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.454356 4751 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.454390 4751 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.454752 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.454806 4751 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.454834 4751 volume_manager.go:287] "The desired_state_of_world populator starts" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.454862 4751 volume_manager.go:289] "Starting Kubelet Volume Manager" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.455795 4751 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.456020 4751 factory.go:55] Registering systemd factory Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.456184 4751 factory.go:221] Registration of the systemd container factory successfully Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.461657 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.461881 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.462063 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="200ms" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.462729 4751 factory.go:153] Registering CRI-O factory Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.462776 4751 factory.go:221] Registration of the crio container factory successfully Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.462812 4751 factory.go:103] Registering Raw factory Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.462833 4751 manager.go:1196] Started watching for new ooms in manager Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.463778 4751 manager.go:319] Starting recovery of all containers Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.464702 4751 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.64:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.189827183e2d5382 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.446146434 +0000 UTC m=+0.593160881,LastTimestamp:2026-02-27 16:23:58.446146434 +0000 UTC m=+0.593160881,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472136 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472205 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472231 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472251 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472269 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472287 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472458 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472481 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472504 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472522 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472540 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472557 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472575 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472598 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472615 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472635 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472654 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472671 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472688 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472705 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472725 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472742 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472760 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472780 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472797 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472874 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472896 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472916 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472933 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472952 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472969 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.472986 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473005 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473022 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473043 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473061 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473079 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473097 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473114 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473132 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473148 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473167 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473187 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473208 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473229 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473247 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473268 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473288 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473309 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473328 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473348 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473368 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473392 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473516 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473538 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473558 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473576 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473768 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473785 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473804 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473821 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473840 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473857 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473874 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473893 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473913 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473930 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473948 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473965 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.473984 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474002 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474020 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474048 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474066 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474084 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474102 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474121 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474140 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474158 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474177 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474196 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474224 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474242 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474260 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474278 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474296 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474316 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474334 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474351 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474368 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474387 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474723 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474751 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474771 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474788 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474844 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474864 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474882 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474899 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474916 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474932 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474950 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474967 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.474986 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.475010 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.475030 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.475049 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.475067 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.475086 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.475109 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.475129 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481314 4751 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481470 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481517 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481568 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481608 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481655 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481682 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481722 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481751 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481780 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481819 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481847 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481880 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481919 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481947 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.481983 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484728 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484790 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484811 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484842 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484865 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484888 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484902 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484916 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484935 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484950 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484964 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484983 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.484996 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.485037 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486330 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486388 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486453 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486480 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486501 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486529 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486551 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486575 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486604 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486625 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486691 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486752 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486782 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486810 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486834 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486861 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486882 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486905 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486933 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486957 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.486984 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487007 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487041 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487068 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487089 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487120 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487142 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487162 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487191 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487213 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487242 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487264 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487291 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487328 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487352 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487375 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487458 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487486 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487515 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487537 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487558 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487588 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487610 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487637 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487657 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487677 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487704 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487725 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487753 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487776 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487798 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487826 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487847 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487874 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487896 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487915 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487943 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487964 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.487990 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.488012 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.488033 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.488058 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.488077 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.488104 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.488127 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.488149 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.488175 4751 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.488198 4751 reconstruct.go:97] "Volume reconstruction finished" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.488213 4751 reconciler.go:26] "Reconciler: start to sync state" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.489313 4751 manager.go:324] Recovery completed Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.503429 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.505613 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.505661 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.505672 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.508568 4751 cpu_manager.go:225] "Starting CPU manager" policy="none" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.508735 4751 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.508776 4751 state_mem.go:36] "Initialized new in-memory state store" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.515702 4751 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.519300 4751 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.519348 4751 status_manager.go:217] "Starting to sync pod status with apiserver" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.519378 4751 kubelet.go:2335] "Starting kubelet main sync loop" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.519524 4751 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Feb 27 16:23:58 crc kubenswrapper[4751]: W0227 16:23:58.520143 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.520218 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.521162 4751 policy_none.go:49] "None policy: Start" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.522117 4751 memory_manager.go:170] "Starting memorymanager" policy="None" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.522167 4751 state_mem.go:35] "Initializing new in-memory state store" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.555236 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.583710 4751 manager.go:334] "Starting Device Plugin manager" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.583774 4751 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.583788 4751 server.go:79] "Starting device plugin registration server" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.584235 4751 eviction_manager.go:189] "Eviction manager: starting control loop" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.584252 4751 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.584730 4751 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.584832 4751 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.584845 4751 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.591252 4751 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.620554 4751 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.620624 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.621537 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.621579 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.621591 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.621729 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.622127 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.622213 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.623351 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.623444 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.623468 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.623634 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.623821 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.623885 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.623993 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.624018 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.624028 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.624708 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.624745 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.624758 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.624881 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.624954 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.624981 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.624993 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.625058 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.625134 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.625620 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.625652 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.625665 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.625833 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.625977 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626015 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626148 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626180 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626189 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626470 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626494 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626503 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626629 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626684 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626816 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626875 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.626899 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.627224 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.627247 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.627258 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.663367 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="400ms" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.685117 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.686705 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.686750 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.686771 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.686807 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.687467 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.64:6443: connect: connection refused" node="crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.690918 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.690961 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.691339 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.691366 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.691393 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.691958 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.692026 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.692100 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.692153 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.692220 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.692324 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.692436 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.692484 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.692549 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.692596 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794466 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794551 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794571 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794592 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794610 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794652 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794670 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794688 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794708 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794725 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794748 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794756 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794798 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794768 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794817 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794861 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794887 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794849 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794919 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794876 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794948 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794907 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794986 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794891 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.795012 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794838 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.795043 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794828 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.794897 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.795137 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.888283 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.890205 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.890250 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.890260 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.890286 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:23:58 crc kubenswrapper[4751]: E0227 16:23:58.890828 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.64:6443: connect: connection refused" node="crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.979640 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:23:58 crc kubenswrapper[4751]: I0227 16:23:58.994380 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.018957 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.027442 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.031560 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:23:59 crc kubenswrapper[4751]: E0227 16:23:59.064788 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="800ms" Feb 27 16:23:59 crc kubenswrapper[4751]: W0227 16:23:59.091647 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-577bfe6f8558505790ccdd5928e53474b79efc11bad9e64f67274caac2ba5633 WatchSource:0}: Error finding container 577bfe6f8558505790ccdd5928e53474b79efc11bad9e64f67274caac2ba5633: Status 404 returned error can't find the container with id 577bfe6f8558505790ccdd5928e53474b79efc11bad9e64f67274caac2ba5633 Feb 27 16:23:59 crc kubenswrapper[4751]: W0227 16:23:59.095598 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-3f473a1506a9ee8024b9199975f3910b4c0adb44fa5e60160145dadaa4f0d2a3 WatchSource:0}: Error finding container 3f473a1506a9ee8024b9199975f3910b4c0adb44fa5e60160145dadaa4f0d2a3: Status 404 returned error can't find the container with id 3f473a1506a9ee8024b9199975f3910b4c0adb44fa5e60160145dadaa4f0d2a3 Feb 27 16:23:59 crc kubenswrapper[4751]: W0227 16:23:59.099762 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-cd1ae7517a85e09ad9a597e59719a0a6e0bc6716e9320e8844d14379e9ad9c85 WatchSource:0}: Error finding container cd1ae7517a85e09ad9a597e59719a0a6e0bc6716e9320e8844d14379e9ad9c85: Status 404 returned error can't find the container with id cd1ae7517a85e09ad9a597e59719a0a6e0bc6716e9320e8844d14379e9ad9c85 Feb 27 16:23:59 crc kubenswrapper[4751]: W0227 16:23:59.106384 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-3535cae7df5e336511d2bedd9963b5dcd10f59ac8ad04a6f9302e084d5540548 WatchSource:0}: Error finding container 3535cae7df5e336511d2bedd9963b5dcd10f59ac8ad04a6f9302e084d5540548: Status 404 returned error can't find the container with id 3535cae7df5e336511d2bedd9963b5dcd10f59ac8ad04a6f9302e084d5540548 Feb 27 16:23:59 crc kubenswrapper[4751]: W0227 16:23:59.107655 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-58d3cb998f6b9236a7b131ae1e7351c0c9f232a8fe908c67d74df38c95ca27a2 WatchSource:0}: Error finding container 58d3cb998f6b9236a7b131ae1e7351c0c9f232a8fe908c67d74df38c95ca27a2: Status 404 returned error can't find the container with id 58d3cb998f6b9236a7b131ae1e7351c0c9f232a8fe908c67d74df38c95ca27a2 Feb 27 16:23:59 crc kubenswrapper[4751]: W0227 16:23:59.278500 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:23:59 crc kubenswrapper[4751]: E0227 16:23:59.278676 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.290982 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:23:59 crc kubenswrapper[4751]: W0227 16:23:59.292873 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:23:59 crc kubenswrapper[4751]: E0227 16:23:59.292980 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.293206 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.293267 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.293285 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.293321 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:23:59 crc kubenswrapper[4751]: E0227 16:23:59.293855 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.64:6443: connect: connection refused" node="crc" Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.452630 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.524821 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"58d3cb998f6b9236a7b131ae1e7351c0c9f232a8fe908c67d74df38c95ca27a2"} Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.526079 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"3535cae7df5e336511d2bedd9963b5dcd10f59ac8ad04a6f9302e084d5540548"} Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.527185 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cd1ae7517a85e09ad9a597e59719a0a6e0bc6716e9320e8844d14379e9ad9c85"} Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.528656 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"3f473a1506a9ee8024b9199975f3910b4c0adb44fa5e60160145dadaa4f0d2a3"} Feb 27 16:23:59 crc kubenswrapper[4751]: I0227 16:23:59.529864 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"577bfe6f8558505790ccdd5928e53474b79efc11bad9e64f67274caac2ba5633"} Feb 27 16:23:59 crc kubenswrapper[4751]: W0227 16:23:59.800120 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:23:59 crc kubenswrapper[4751]: E0227 16:23:59.800224 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:23:59 crc kubenswrapper[4751]: E0227 16:23:59.865576 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="1.6s" Feb 27 16:23:59 crc kubenswrapper[4751]: W0227 16:23:59.937176 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:23:59 crc kubenswrapper[4751]: E0227 16:23:59.937705 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.094567 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.097915 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.097977 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.098000 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.098042 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:24:00 crc kubenswrapper[4751]: E0227 16:24:00.098829 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.64:6443: connect: connection refused" node="crc" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.384542 4751 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 27 16:24:00 crc kubenswrapper[4751]: E0227 16:24:00.385460 4751 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.454115 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.534794 4751 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57" exitCode=0 Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.534848 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57"} Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.534939 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.536240 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.536269 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.536280 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.540649 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f2444e31d4c252ddbff520f5604104b24d3d356ad1c13579d3c22e3e12136de0"} Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.540701 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd"} Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.540726 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821"} Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.543967 4751 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd" exitCode=0 Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.544081 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd"} Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.544093 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.545960 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.545983 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.545992 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.548837 4751 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="325608014a4301b4d07b01cde948ca31550694b56cf5433c6a59f4334699ddf7" exitCode=0 Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.548898 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.548888 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"325608014a4301b4d07b01cde948ca31550694b56cf5433c6a59f4334699ddf7"} Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.550091 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.550147 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.550159 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.552991 4751 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd" exitCode=0 Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.553046 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd"} Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.553186 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.554733 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.554758 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.554769 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.557211 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.558418 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.558445 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:00 crc kubenswrapper[4751]: I0227 16:24:00.558473 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:01 crc kubenswrapper[4751]: W0227 16:24:01.104263 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:24:01 crc kubenswrapper[4751]: E0227 16:24:01.104332 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.453201 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:24:01 crc kubenswrapper[4751]: E0227 16:24:01.466774 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="3.2s" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.561844 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f"} Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.561888 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560"} Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.561897 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb"} Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.561905 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129"} Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.564896 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c"} Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.564925 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2"} Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.564938 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe"} Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.565029 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.565753 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.565777 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.565789 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.568144 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"36a6674fde1a5972f2881ddc34b464ea203d05e1e48901c4547ea4aa99085faa"} Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.568226 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.568909 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.568926 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.568933 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.571052 4751 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd" exitCode=0 Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.571101 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd"} Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.571184 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.571805 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.571824 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.571831 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.575577 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"b2ea2055e866712461d10a343f95abebd9e5433f73257f6d09a2faf188b7e0d4"} Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.575614 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.576293 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.576326 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.576337 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.699867 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.701099 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.701130 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.701475 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.701508 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:24:01 crc kubenswrapper[4751]: E0227 16:24:01.703627 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.64:6443: connect: connection refused" node="crc" Feb 27 16:24:01 crc kubenswrapper[4751]: I0227 16:24:01.769127 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 27 16:24:01 crc kubenswrapper[4751]: W0227 16:24:01.882251 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.64:6443: connect: connection refused Feb 27 16:24:01 crc kubenswrapper[4751]: E0227 16:24:01.882376 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.64:6443: connect: connection refused" logger="UnhandledError" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.582155 4751 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096" exitCode=0 Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.582321 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096"} Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.582376 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.585098 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.585208 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.585230 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.589189 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.589739 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f57e79f3ba63c4adac2c054c523090037664215b6b6a730cb5a513b12efc50a1"} Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.590101 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.591674 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.592143 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.593208 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.593251 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.593271 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.593273 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.593213 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.593359 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.593391 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.593308 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.593520 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.593991 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.594151 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:02 crc kubenswrapper[4751]: I0227 16:24:02.594322 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.595298 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0"} Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.595346 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4"} Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.595362 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03"} Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.595430 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.595453 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.595483 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.596535 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.596577 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.596592 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.596653 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.596671 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:03 crc kubenswrapper[4751]: I0227 16:24:03.596687 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.510113 4751 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.603363 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054"} Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.603434 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6"} Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.603470 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.603540 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.604288 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.604311 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.604349 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.604999 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.605057 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.605080 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.904603 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.904776 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.907265 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.907332 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.907351 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:04 crc kubenswrapper[4751]: I0227 16:24:04.907392 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:24:05 crc kubenswrapper[4751]: I0227 16:24:05.605691 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:05 crc kubenswrapper[4751]: I0227 16:24:05.605798 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:05 crc kubenswrapper[4751]: I0227 16:24:05.606583 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:05 crc kubenswrapper[4751]: I0227 16:24:05.606638 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:05 crc kubenswrapper[4751]: I0227 16:24:05.606658 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:05 crc kubenswrapper[4751]: I0227 16:24:05.606847 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:05 crc kubenswrapper[4751]: I0227 16:24:05.606894 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:05 crc kubenswrapper[4751]: I0227 16:24:05.606912 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.468551 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.468813 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.470488 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.470550 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.470576 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.717493 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.717670 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.718902 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.718966 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.718983 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.996780 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.997011 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.998671 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.998724 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:07 crc kubenswrapper[4751]: I0227 16:24:07.998742 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:08 crc kubenswrapper[4751]: I0227 16:24:08.331368 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:08 crc kubenswrapper[4751]: E0227 16:24:08.591437 4751 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 27 16:24:08 crc kubenswrapper[4751]: I0227 16:24:08.613036 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:08 crc kubenswrapper[4751]: I0227 16:24:08.614072 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:08 crc kubenswrapper[4751]: I0227 16:24:08.614136 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:08 crc kubenswrapper[4751]: I0227 16:24:08.614155 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:09 crc kubenswrapper[4751]: I0227 16:24:09.255943 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Feb 27 16:24:09 crc kubenswrapper[4751]: I0227 16:24:09.256150 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:09 crc kubenswrapper[4751]: I0227 16:24:09.257949 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:09 crc kubenswrapper[4751]: I0227 16:24:09.258015 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:09 crc kubenswrapper[4751]: I0227 16:24:09.258034 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.254213 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.254469 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.256456 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.256508 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.256522 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.262855 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.619009 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.619207 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.620478 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.620538 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.620556 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:10 crc kubenswrapper[4751]: I0227 16:24:10.625310 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:11 crc kubenswrapper[4751]: I0227 16:24:11.332241 4751 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 27 16:24:11 crc kubenswrapper[4751]: I0227 16:24:11.332373 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 27 16:24:11 crc kubenswrapper[4751]: I0227 16:24:11.622755 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:11 crc kubenswrapper[4751]: I0227 16:24:11.624529 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:11 crc kubenswrapper[4751]: I0227 16:24:11.624599 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:11 crc kubenswrapper[4751]: I0227 16:24:11.624623 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:11 crc kubenswrapper[4751]: I0227 16:24:11.835045 4751 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Feb 27 16:24:11 crc kubenswrapper[4751]: I0227 16:24:11.835126 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Feb 27 16:24:12 crc kubenswrapper[4751]: W0227 16:24:12.258138 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.258283 4751 trace.go:236] Trace[665852879]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (27-Feb-2026 16:24:02.256) (total time: 10001ms): Feb 27 16:24:12 crc kubenswrapper[4751]: Trace[665852879]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:24:12.258) Feb 27 16:24:12 crc kubenswrapper[4751]: Trace[665852879]: [10.001821043s] [10.001821043s] END Feb 27 16:24:12 crc kubenswrapper[4751]: E0227 16:24:12.258322 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.453682 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Feb 27 16:24:12 crc kubenswrapper[4751]: W0227 16:24:12.523012 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.523170 4751 trace.go:236] Trace[1047253298]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (27-Feb-2026 16:24:02.521) (total time: 10001ms): Feb 27 16:24:12 crc kubenswrapper[4751]: Trace[1047253298]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:24:12.522) Feb 27 16:24:12 crc kubenswrapper[4751]: Trace[1047253298]: [10.001648556s] [10.001648556s] END Feb 27 16:24:12 crc kubenswrapper[4751]: E0227 16:24:12.523225 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.582453 4751 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.582525 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Feb 27 16:24:12 crc kubenswrapper[4751]: E0227 16:24:12.621951 4751 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:12Z is after 2026-02-23T05:33:13Z" event="&Event{ObjectMeta:{crc.189827183e2d5382 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.446146434 +0000 UTC m=+0.593160881,LastTimestamp:2026-02-27 16:23:58.446146434 +0000 UTC m=+0.593160881,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.624964 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.626187 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.626231 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.626247 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:12 crc kubenswrapper[4751]: E0227 16:24:12.629792 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:12Z is after 2026-02-23T05:33:13Z" interval="6.4s" Feb 27 16:24:12 crc kubenswrapper[4751]: W0227 16:24:12.632738 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:12Z is after 2026-02-23T05:33:13Z Feb 27 16:24:12 crc kubenswrapper[4751]: E0227 16:24:12.632982 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:12Z is after 2026-02-23T05:33:13Z" logger="UnhandledError" Feb 27 16:24:12 crc kubenswrapper[4751]: E0227 16:24:12.634765 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:12Z is after 2026-02-23T05:33:13Z" node="crc" Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.635315 4751 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.635424 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Feb 27 16:24:12 crc kubenswrapper[4751]: W0227 16:24:12.639264 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:12Z is after 2026-02-23T05:33:13Z Feb 27 16:24:12 crc kubenswrapper[4751]: E0227 16:24:12.639375 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:12Z is after 2026-02-23T05:33:13Z" logger="UnhandledError" Feb 27 16:24:12 crc kubenswrapper[4751]: E0227 16:24:12.642915 4751 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:12Z is after 2026-02-23T05:33:13Z" logger="UnhandledError" Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.646242 4751 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Feb 27 16:24:12 crc kubenswrapper[4751]: I0227 16:24:12.646699 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Feb 27 16:24:13 crc kubenswrapper[4751]: I0227 16:24:13.458367 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:13Z is after 2026-02-23T05:33:13Z Feb 27 16:24:13 crc kubenswrapper[4751]: I0227 16:24:13.629588 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 27 16:24:13 crc kubenswrapper[4751]: I0227 16:24:13.631648 4751 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f57e79f3ba63c4adac2c054c523090037664215b6b6a730cb5a513b12efc50a1" exitCode=255 Feb 27 16:24:13 crc kubenswrapper[4751]: I0227 16:24:13.631748 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"f57e79f3ba63c4adac2c054c523090037664215b6b6a730cb5a513b12efc50a1"} Feb 27 16:24:13 crc kubenswrapper[4751]: I0227 16:24:13.632077 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:13 crc kubenswrapper[4751]: I0227 16:24:13.632918 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:13 crc kubenswrapper[4751]: I0227 16:24:13.633012 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:13 crc kubenswrapper[4751]: I0227 16:24:13.633085 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:13 crc kubenswrapper[4751]: I0227 16:24:13.633675 4751 scope.go:117] "RemoveContainer" containerID="f57e79f3ba63c4adac2c054c523090037664215b6b6a730cb5a513b12efc50a1" Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.455946 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:14Z is after 2026-02-23T05:33:13Z Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.638321 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.638949 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.641844 4751 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="edc27e8f7cfeb6da896d488d21bee14332117a0646e2f2934d51ca4c44df06b7" exitCode=255 Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.641921 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"edc27e8f7cfeb6da896d488d21bee14332117a0646e2f2934d51ca4c44df06b7"} Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.641996 4751 scope.go:117] "RemoveContainer" containerID="f57e79f3ba63c4adac2c054c523090037664215b6b6a730cb5a513b12efc50a1" Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.642222 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.643584 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.643629 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.643644 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.644695 4751 scope.go:117] "RemoveContainer" containerID="edc27e8f7cfeb6da896d488d21bee14332117a0646e2f2934d51ca4c44df06b7" Feb 27 16:24:14 crc kubenswrapper[4751]: E0227 16:24:14.645104 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:24:14 crc kubenswrapper[4751]: I0227 16:24:14.912448 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:24:15 crc kubenswrapper[4751]: I0227 16:24:15.456617 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:15Z is after 2026-02-23T05:33:13Z Feb 27 16:24:15 crc kubenswrapper[4751]: I0227 16:24:15.647395 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 27 16:24:15 crc kubenswrapper[4751]: I0227 16:24:15.650000 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:15 crc kubenswrapper[4751]: I0227 16:24:15.652231 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:15 crc kubenswrapper[4751]: I0227 16:24:15.652280 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:15 crc kubenswrapper[4751]: I0227 16:24:15.652291 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:15 crc kubenswrapper[4751]: I0227 16:24:15.653060 4751 scope.go:117] "RemoveContainer" containerID="edc27e8f7cfeb6da896d488d21bee14332117a0646e2f2934d51ca4c44df06b7" Feb 27 16:24:15 crc kubenswrapper[4751]: E0227 16:24:15.653279 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:24:15 crc kubenswrapper[4751]: I0227 16:24:15.657143 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:24:16 crc kubenswrapper[4751]: W0227 16:24:16.027937 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:16Z is after 2026-02-23T05:33:13Z Feb 27 16:24:16 crc kubenswrapper[4751]: E0227 16:24:16.028052 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:16Z is after 2026-02-23T05:33:13Z" logger="UnhandledError" Feb 27 16:24:16 crc kubenswrapper[4751]: I0227 16:24:16.459126 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:16Z is after 2026-02-23T05:33:13Z Feb 27 16:24:16 crc kubenswrapper[4751]: I0227 16:24:16.653316 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:16 crc kubenswrapper[4751]: I0227 16:24:16.654610 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:16 crc kubenswrapper[4751]: I0227 16:24:16.654673 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:16 crc kubenswrapper[4751]: I0227 16:24:16.654698 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:16 crc kubenswrapper[4751]: I0227 16:24:16.655652 4751 scope.go:117] "RemoveContainer" containerID="edc27e8f7cfeb6da896d488d21bee14332117a0646e2f2934d51ca4c44df06b7" Feb 27 16:24:16 crc kubenswrapper[4751]: E0227 16:24:16.655944 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:24:17 crc kubenswrapper[4751]: I0227 16:24:17.457236 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:17Z is after 2026-02-23T05:33:13Z Feb 27 16:24:17 crc kubenswrapper[4751]: I0227 16:24:17.756086 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Feb 27 16:24:17 crc kubenswrapper[4751]: I0227 16:24:17.756913 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:17 crc kubenswrapper[4751]: I0227 16:24:17.758257 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:17 crc kubenswrapper[4751]: I0227 16:24:17.758298 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:17 crc kubenswrapper[4751]: I0227 16:24:17.758315 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:17 crc kubenswrapper[4751]: I0227 16:24:17.774005 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Feb 27 16:24:17 crc kubenswrapper[4751]: W0227 16:24:17.873331 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:17Z is after 2026-02-23T05:33:13Z Feb 27 16:24:17 crc kubenswrapper[4751]: E0227 16:24:17.873471 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:17Z is after 2026-02-23T05:33:13Z" logger="UnhandledError" Feb 27 16:24:18 crc kubenswrapper[4751]: I0227 16:24:18.458223 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:18Z is after 2026-02-23T05:33:13Z Feb 27 16:24:18 crc kubenswrapper[4751]: E0227 16:24:18.591598 4751 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 27 16:24:18 crc kubenswrapper[4751]: I0227 16:24:18.659307 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:18 crc kubenswrapper[4751]: I0227 16:24:18.661281 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:18 crc kubenswrapper[4751]: I0227 16:24:18.661332 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:18 crc kubenswrapper[4751]: I0227 16:24:18.661346 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:19 crc kubenswrapper[4751]: I0227 16:24:19.034922 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:19 crc kubenswrapper[4751]: E0227 16:24:19.034922 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:19Z is after 2026-02-23T05:33:13Z" interval="7s" Feb 27 16:24:19 crc kubenswrapper[4751]: I0227 16:24:19.037482 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:19 crc kubenswrapper[4751]: I0227 16:24:19.037551 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:19 crc kubenswrapper[4751]: I0227 16:24:19.037569 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:19 crc kubenswrapper[4751]: I0227 16:24:19.037604 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:24:19 crc kubenswrapper[4751]: E0227 16:24:19.042664 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:19Z is after 2026-02-23T05:33:13Z" node="crc" Feb 27 16:24:19 crc kubenswrapper[4751]: I0227 16:24:19.459844 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:19Z is after 2026-02-23T05:33:13Z Feb 27 16:24:19 crc kubenswrapper[4751]: W0227 16:24:19.644732 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:19Z is after 2026-02-23T05:33:13Z Feb 27 16:24:19 crc kubenswrapper[4751]: E0227 16:24:19.644862 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:19Z is after 2026-02-23T05:33:13Z" logger="UnhandledError" Feb 27 16:24:20 crc kubenswrapper[4751]: I0227 16:24:20.458196 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:20Z is after 2026-02-23T05:33:13Z Feb 27 16:24:20 crc kubenswrapper[4751]: W0227 16:24:20.640427 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:20Z is after 2026-02-23T05:33:13Z Feb 27 16:24:20 crc kubenswrapper[4751]: E0227 16:24:20.640827 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:20Z is after 2026-02-23T05:33:13Z" logger="UnhandledError" Feb 27 16:24:21 crc kubenswrapper[4751]: I0227 16:24:21.331980 4751 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 27 16:24:21 crc kubenswrapper[4751]: I0227 16:24:21.332065 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 27 16:24:21 crc kubenswrapper[4751]: I0227 16:24:21.383446 4751 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 27 16:24:21 crc kubenswrapper[4751]: E0227 16:24:21.389359 4751 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:21Z is after 2026-02-23T05:33:13Z" logger="UnhandledError" Feb 27 16:24:21 crc kubenswrapper[4751]: I0227 16:24:21.458226 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:21Z is after 2026-02-23T05:33:13Z Feb 27 16:24:21 crc kubenswrapper[4751]: I0227 16:24:21.834585 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:24:21 crc kubenswrapper[4751]: I0227 16:24:21.834792 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:21 crc kubenswrapper[4751]: I0227 16:24:21.835968 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:21 crc kubenswrapper[4751]: I0227 16:24:21.836000 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:21 crc kubenswrapper[4751]: I0227 16:24:21.836008 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:21 crc kubenswrapper[4751]: I0227 16:24:21.836490 4751 scope.go:117] "RemoveContainer" containerID="edc27e8f7cfeb6da896d488d21bee14332117a0646e2f2934d51ca4c44df06b7" Feb 27 16:24:21 crc kubenswrapper[4751]: E0227 16:24:21.836641 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:24:22 crc kubenswrapper[4751]: I0227 16:24:22.457862 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:22Z is after 2026-02-23T05:33:13Z Feb 27 16:24:22 crc kubenswrapper[4751]: I0227 16:24:22.582786 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:24:22 crc kubenswrapper[4751]: E0227 16:24:22.627849 4751 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:22Z is after 2026-02-23T05:33:13Z" event="&Event{ObjectMeta:{crc.189827183e2d5382 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.446146434 +0000 UTC m=+0.593160881,LastTimestamp:2026-02-27 16:23:58.446146434 +0000 UTC m=+0.593160881,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:22 crc kubenswrapper[4751]: I0227 16:24:22.674329 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:22 crc kubenswrapper[4751]: I0227 16:24:22.678787 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:22 crc kubenswrapper[4751]: I0227 16:24:22.678866 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:22 crc kubenswrapper[4751]: I0227 16:24:22.678891 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:22 crc kubenswrapper[4751]: I0227 16:24:22.679970 4751 scope.go:117] "RemoveContainer" containerID="edc27e8f7cfeb6da896d488d21bee14332117a0646e2f2934d51ca4c44df06b7" Feb 27 16:24:22 crc kubenswrapper[4751]: E0227 16:24:22.680280 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:24:23 crc kubenswrapper[4751]: I0227 16:24:23.458162 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:23Z is after 2026-02-23T05:33:13Z Feb 27 16:24:24 crc kubenswrapper[4751]: I0227 16:24:24.457775 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:24Z is after 2026-02-23T05:33:13Z Feb 27 16:24:25 crc kubenswrapper[4751]: I0227 16:24:25.456206 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:25Z is after 2026-02-23T05:33:13Z Feb 27 16:24:26 crc kubenswrapper[4751]: E0227 16:24:26.041787 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:26Z is after 2026-02-23T05:33:13Z" interval="7s" Feb 27 16:24:26 crc kubenswrapper[4751]: I0227 16:24:26.043011 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:26 crc kubenswrapper[4751]: I0227 16:24:26.045121 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:26 crc kubenswrapper[4751]: I0227 16:24:26.045210 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:26 crc kubenswrapper[4751]: I0227 16:24:26.045235 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:26 crc kubenswrapper[4751]: I0227 16:24:26.045288 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:24:26 crc kubenswrapper[4751]: E0227 16:24:26.048516 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:26Z is after 2026-02-23T05:33:13Z" node="crc" Feb 27 16:24:26 crc kubenswrapper[4751]: I0227 16:24:26.457995 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:26Z is after 2026-02-23T05:33:13Z Feb 27 16:24:27 crc kubenswrapper[4751]: I0227 16:24:27.458431 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:27Z is after 2026-02-23T05:33:13Z Feb 27 16:24:28 crc kubenswrapper[4751]: I0227 16:24:28.457570 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:28Z is after 2026-02-23T05:33:13Z Feb 27 16:24:28 crc kubenswrapper[4751]: W0227 16:24:28.538236 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:28Z is after 2026-02-23T05:33:13Z Feb 27 16:24:28 crc kubenswrapper[4751]: E0227 16:24:28.538324 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:28Z is after 2026-02-23T05:33:13Z" logger="UnhandledError" Feb 27 16:24:28 crc kubenswrapper[4751]: E0227 16:24:28.591748 4751 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 27 16:24:29 crc kubenswrapper[4751]: I0227 16:24:29.457559 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:24:29Z is after 2026-02-23T05:33:13Z Feb 27 16:24:30 crc kubenswrapper[4751]: I0227 16:24:30.460049 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:30 crc kubenswrapper[4751]: W0227 16:24:30.531684 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:anonymous" cannot list resource "services" in API group "" at the cluster scope Feb 27 16:24:30 crc kubenswrapper[4751]: E0227 16:24:30.531751 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:anonymous\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.273230 4751 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:54556->192.168.126.11:10357: read: connection reset by peer" start-of-body= Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.273685 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:54556->192.168.126.11:10357: read: connection reset by peer" Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.274507 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.274753 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.276550 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.276616 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.276637 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.277306 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.277591 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" containerID="cri-o://c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd" gracePeriod=30 Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.459185 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.703070 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/cluster-policy-controller/0.log" Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.703784 4751 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd" exitCode=255 Feb 27 16:24:31 crc kubenswrapper[4751]: I0227 16:24:31.703834 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd"} Feb 27 16:24:32 crc kubenswrapper[4751]: I0227 16:24:32.460858 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.636359 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189827183e2d5382 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.446146434 +0000 UTC m=+0.593160881,LastTimestamp:2026-02-27 16:23:58.446146434 +0000 UTC m=+0.593160881,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.644002 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9378d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505645965 +0000 UTC m=+0.652660412,LastTimestamp:2026-02-27 16:23:58.505645965 +0000 UTC m=+0.652660412,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.650893 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b98d10 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505667856 +0000 UTC m=+0.652682303,LastTimestamp:2026-02-27 16:23:58.505667856 +0000 UTC m=+0.652682303,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.656246 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9b556 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505678166 +0000 UTC m=+0.652692613,LastTimestamp:2026-02-27 16:23:58.505678166 +0000 UTC m=+0.652692613,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.660255 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189827184687e6cb default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeAllocatableEnforced,Message:Updated Node Allocatable limit across pods,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.586300107 +0000 UTC m=+0.733314564,LastTimestamp:2026-02-27 16:23:58.586300107 +0000 UTC m=+0.733314564,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.667952 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9378d\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9378d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505645965 +0000 UTC m=+0.652660412,LastTimestamp:2026-02-27 16:23:58.621565767 +0000 UTC m=+0.768580214,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.674843 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b98d10\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b98d10 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505667856 +0000 UTC m=+0.652682303,LastTimestamp:2026-02-27 16:23:58.621587568 +0000 UTC m=+0.768602015,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.686389 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9b556\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9b556 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505678166 +0000 UTC m=+0.652692613,LastTimestamp:2026-02-27 16:23:58.621597468 +0000 UTC m=+0.768611915,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.695363 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9378d\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9378d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505645965 +0000 UTC m=+0.652660412,LastTimestamp:2026-02-27 16:23:58.623382576 +0000 UTC m=+0.770397053,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.703575 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b98d10\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b98d10 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505667856 +0000 UTC m=+0.652682303,LastTimestamp:2026-02-27 16:23:58.623459489 +0000 UTC m=+0.770473976,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: I0227 16:24:32.712944 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/cluster-policy-controller/0.log" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.713086 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9b556\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9b556 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505678166 +0000 UTC m=+0.652692613,LastTimestamp:2026-02-27 16:23:58.623479129 +0000 UTC m=+0.770493616,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: I0227 16:24:32.714286 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4cbc47ef2524239d9c1679189bfa96296620af4fcfe02507695d700d6455eda4"} Feb 27 16:24:32 crc kubenswrapper[4751]: I0227 16:24:32.714489 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:32 crc kubenswrapper[4751]: I0227 16:24:32.716007 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:32 crc kubenswrapper[4751]: I0227 16:24:32.716269 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:32 crc kubenswrapper[4751]: I0227 16:24:32.716549 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.718700 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9378d\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9378d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505645965 +0000 UTC m=+0.652660412,LastTimestamp:2026-02-27 16:23:58.624013197 +0000 UTC m=+0.771027644,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.725213 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b98d10\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b98d10 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505667856 +0000 UTC m=+0.652682303,LastTimestamp:2026-02-27 16:23:58.624024917 +0000 UTC m=+0.771039364,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.731049 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9b556\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9b556 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505678166 +0000 UTC m=+0.652692613,LastTimestamp:2026-02-27 16:23:58.624033647 +0000 UTC m=+0.771048094,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.736738 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9378d\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9378d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505645965 +0000 UTC m=+0.652660412,LastTimestamp:2026-02-27 16:23:58.62473042 +0000 UTC m=+0.771744877,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.743920 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b98d10\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b98d10 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505667856 +0000 UTC m=+0.652682303,LastTimestamp:2026-02-27 16:23:58.624752301 +0000 UTC m=+0.771766768,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.745237 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9b556\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9b556 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505678166 +0000 UTC m=+0.652692613,LastTimestamp:2026-02-27 16:23:58.624764701 +0000 UTC m=+0.771779168,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.750016 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9378d\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9378d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505645965 +0000 UTC m=+0.652660412,LastTimestamp:2026-02-27 16:23:58.624975878 +0000 UTC m=+0.771990325,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.756885 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b98d10\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b98d10 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505667856 +0000 UTC m=+0.652682303,LastTimestamp:2026-02-27 16:23:58.624988249 +0000 UTC m=+0.772002696,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.763741 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9b556\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9b556 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505678166 +0000 UTC m=+0.652692613,LastTimestamp:2026-02-27 16:23:58.624999319 +0000 UTC m=+0.772013766,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.770187 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9378d\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9378d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505645965 +0000 UTC m=+0.652660412,LastTimestamp:2026-02-27 16:23:58.62564091 +0000 UTC m=+0.772655377,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.774907 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b98d10\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b98d10 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505667856 +0000 UTC m=+0.652682303,LastTimestamp:2026-02-27 16:23:58.625660691 +0000 UTC m=+0.772675148,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.778748 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9b556\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9b556 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505678166 +0000 UTC m=+0.652692613,LastTimestamp:2026-02-27 16:23:58.625673651 +0000 UTC m=+0.772688118,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.783858 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b9378d\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b9378d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505645965 +0000 UTC m=+0.652660412,LastTimestamp:2026-02-27 16:23:58.626167827 +0000 UTC m=+0.773182274,Count:8,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.788339 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.1898271841b98d10\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.1898271841b98d10 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:58.505667856 +0000 UTC m=+0.652682303,LastTimestamp:2026-02-27 16:23:58.626186258 +0000 UTC m=+0.773200705,Count:8,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.795107 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718653ee50b openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.101609227 +0000 UTC m=+1.248623684,LastTimestamp:2026-02-27 16:23:59.101609227 +0000 UTC m=+1.248623684,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.801544 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718654c5438 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.102489656 +0000 UTC m=+1.249504143,LastTimestamp:2026-02-27 16:23:59.102489656 +0000 UTC m=+1.249504143,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.806062 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.1898271865549312 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.103030034 +0000 UTC m=+1.250044491,LastTimestamp:2026-02-27 16:23:59.103030034 +0000 UTC m=+1.250044491,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.812272 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.1898271865b18c32 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d1b160f5dda77d281dd8e69ec8d817f9,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.109123122 +0000 UTC m=+1.256137579,LastTimestamp:2026-02-27 16:23:59.109123122 +0000 UTC m=+1.256137579,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.818182 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189827186749fed4 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.135891156 +0000 UTC m=+1.282905613,LastTimestamp:2026-02-27 16:23:59.135891156 +0000 UTC m=+1.282905613,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.822287 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189827188fa9e3eb openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Created,Message:Created container kube-controller-manager,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.813264363 +0000 UTC m=+1.960278820,LastTimestamp:2026-02-27 16:23:59.813264363 +0000 UTC m=+1.960278820,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.826511 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.1898271890106dc3 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Created,Message:Created container wait-for-host-port,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.819984323 +0000 UTC m=+1.966998780,LastTimestamp:2026-02-27 16:23:59.819984323 +0000 UTC m=+1.966998780,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.830022 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.18982718905fe321 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d1b160f5dda77d281dd8e69ec8d817f9,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.825191713 +0000 UTC m=+1.972206170,LastTimestamp:2026-02-27 16:23:59.825191713 +0000 UTC m=+1.972206170,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.835337 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718907cd11c openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Started,Message:Started container kube-controller-manager,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.827087644 +0000 UTC m=+1.974102101,LastTimestamp:2026-02-27 16:23:59.827087644 +0000 UTC m=+1.974102101,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.840140 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189827189094e358 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.828665176 +0000 UTC m=+1.975679633,LastTimestamp:2026-02-27 16:23:59.828665176 +0000 UTC m=+1.975679633,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.847217 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718912786f7 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Started,Message:Started container wait-for-host-port,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.838275319 +0000 UTC m=+1.985289776,LastTimestamp:2026-02-27 16:23:59.838275319 +0000 UTC m=+1.985289776,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.853121 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.1898271891719b56 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.843130198 +0000 UTC m=+1.990144655,LastTimestamp:2026-02-27 16:23:59.843130198 +0000 UTC m=+1.990144655,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.855063 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271891af03f7 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.847154679 +0000 UTC m=+1.994169136,LastTimestamp:2026-02-27 16:23:59.847154679 +0000 UTC m=+1.994169136,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.861854 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.1898271891afa1dc openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d1b160f5dda77d281dd8e69ec8d817f9,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.8471951 +0000 UTC m=+1.994209597,LastTimestamp:2026-02-27 16:23:59.8471951 +0000 UTC m=+1.994209597,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.868492 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718924ea0fc openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.8576151 +0000 UTC m=+2.004629587,LastTimestamp:2026-02-27 16:23:59.8576151 +0000 UTC m=+2.004629587,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.875048 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271892a156d7 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.863035607 +0000 UTC m=+2.010050094,LastTimestamp:2026-02-27 16:23:59.863035607 +0000 UTC m=+2.010050094,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.882059 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718a403f6f4 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Created,Message:Created container cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.154711796 +0000 UTC m=+2.301726253,LastTimestamp:2026-02-27 16:24:00.154711796 +0000 UTC m=+2.301726253,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.889009 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718a51753fe openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Started,Message:Started container cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.172758014 +0000 UTC m=+2.319772501,LastTimestamp:2026-02-27 16:24:00.172758014 +0000 UTC m=+2.319772501,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.896430 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718a52e4137 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.174260535 +0000 UTC m=+2.321274992,LastTimestamp:2026-02-27 16:24:00.174260535 +0000 UTC m=+2.321274992,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.903220 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718b1153178 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Created,Message:Created container kube-controller-manager-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.373944696 +0000 UTC m=+2.520959133,LastTimestamp:2026-02-27 16:24:00.373944696 +0000 UTC m=+2.520959133,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.910370 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718b1bd414e openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Started,Message:Started container kube-controller-manager-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.384958798 +0000 UTC m=+2.531973245,LastTimestamp:2026-02-27 16:24:00.384958798 +0000 UTC m=+2.531973245,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.916673 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718b1ce7eec openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.386088684 +0000 UTC m=+2.533103131,LastTimestamp:2026-02-27 16:24:00.386088684 +0000 UTC m=+2.533103131,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.923142 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718bad76df8 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.537669112 +0000 UTC m=+2.684683559,LastTimestamp:2026-02-27 16:24:00.537669112 +0000 UTC m=+2.684683559,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.928682 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.18982718bb6c6cd7 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.547433687 +0000 UTC m=+2.694448134,LastTimestamp:2026-02-27 16:24:00.547433687 +0000 UTC m=+2.694448134,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.941012 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.18982718bbac0db2 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d1b160f5dda77d281dd8e69ec8d817f9,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.551603634 +0000 UTC m=+2.698618121,LastTimestamp:2026-02-27 16:24:00.551603634 +0000 UTC m=+2.698618121,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.946271 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718bbff3181 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.557052289 +0000 UTC m=+2.704066746,LastTimestamp:2026-02-27 16:24:00.557052289 +0000 UTC m=+2.704066746,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.952884 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718c021d607 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Created,Message:Created container kube-controller-manager-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.626431495 +0000 UTC m=+2.773445952,LastTimestamp:2026-02-27 16:24:00.626431495 +0000 UTC m=+2.773445952,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.957137 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718c158c720 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Started,Message:Started container kube-controller-manager-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.646809376 +0000 UTC m=+2.793823843,LastTimestamp:2026-02-27 16:24:00.646809376 +0000 UTC m=+2.793823843,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.962235 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718c7fd4b06 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Created,Message:Created container kube-scheduler,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.758254342 +0000 UTC m=+2.905268789,LastTimestamp:2026-02-27 16:24:00.758254342 +0000 UTC m=+2.905268789,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.966567 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718c89ab4b0 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Started,Message:Started container kube-scheduler,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.768570544 +0000 UTC m=+2.915584991,LastTimestamp:2026-02-27 16:24:00.768570544 +0000 UTC m=+2.915584991,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.971067 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718c8aa022a openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.769573418 +0000 UTC m=+2.916587865,LastTimestamp:2026-02-27 16:24:00.769573418 +0000 UTC m=+2.916587865,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.978276 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718cb16db1d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Created,Message:Created container kube-apiserver,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.810261277 +0000 UTC m=+2.957275734,LastTimestamp:2026-02-27 16:24:00.810261277 +0000 UTC m=+2.957275734,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.985236 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.18982718cb4c29dd openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Created,Message:Created container etcd-ensure-env-vars,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.813754845 +0000 UTC m=+2.960769292,LastTimestamp:2026-02-27 16:24:00.813754845 +0000 UTC m=+2.960769292,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.991524 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.18982718cb4ca616 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d1b160f5dda77d281dd8e69ec8d817f9,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Created,Message:Created container kube-rbac-proxy-crio,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.813786646 +0000 UTC m=+2.960801103,LastTimestamp:2026-02-27 16:24:00.813786646 +0000 UTC m=+2.960801103,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:32 crc kubenswrapper[4751]: E0227 16:24:32.998183 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718cc09c7d3 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Started,Message:Started container kube-apiserver,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.826181587 +0000 UTC m=+2.973196034,LastTimestamp:2026-02-27 16:24:00.826181587 +0000 UTC m=+2.973196034,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.004520 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718cc189752 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.82715221 +0000 UTC m=+2.974166657,LastTimestamp:2026-02-27 16:24:00.82715221 +0000 UTC m=+2.974166657,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.009958 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.18982718cce99eaf openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d1b160f5dda77d281dd8e69ec8d817f9,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Started,Message:Started container kube-rbac-proxy-crio,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.840851119 +0000 UTC m=+2.987865566,LastTimestamp:2026-02-27 16:24:00.840851119 +0000 UTC m=+2.987865566,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.016297 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.18982718cd316513 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Started,Message:Started container etcd-ensure-env-vars,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.845554963 +0000 UTC m=+2.992569420,LastTimestamp:2026-02-27 16:24:00.845554963 +0000 UTC m=+2.992569420,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.020873 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718d285451d openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Created,Message:Created container kube-scheduler-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.934937885 +0000 UTC m=+3.081952332,LastTimestamp:2026-02-27 16:24:00.934937885 +0000 UTC m=+3.081952332,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.029768 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718d3e3635f openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Started,Message:Started container kube-scheduler-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.957883231 +0000 UTC m=+3.104897678,LastTimestamp:2026-02-27 16:24:00.957883231 +0000 UTC m=+3.104897678,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.036507 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718d3f365c0 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.958932416 +0000 UTC m=+3.105946853,LastTimestamp:2026-02-27 16:24:00.958932416 +0000 UTC m=+3.105946853,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.040838 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718d7bf6615 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Created,Message:Created container kube-apiserver-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.022633493 +0000 UTC m=+3.169647950,LastTimestamp:2026-02-27 16:24:01.022633493 +0000 UTC m=+3.169647950,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.046334 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.046714 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718d8c7428e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Started,Message:Started container kube-apiserver-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.039925902 +0000 UTC m=+3.186940359,LastTimestamp:2026-02-27 16:24:01.039925902 +0000 UTC m=+3.186940359,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.049999 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.051256 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718d8d5c6bd openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.040877245 +0000 UTC m=+3.187891702,LastTimestamp:2026-02-27 16:24:01.040877245 +0000 UTC m=+3.187891702,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.051768 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.051806 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.051827 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.051866 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.056338 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes is forbidden: User \"system:anonymous\" cannot create resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.056480 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718e0303d53 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Created,Message:Created container kube-scheduler-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.164246355 +0000 UTC m=+3.311260802,LastTimestamp:2026-02-27 16:24:01.164246355 +0000 UTC m=+3.311260802,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.057749 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.18982718e10d0624 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:3dcd261975c3d6b9a6ad6367fd4facd3,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Started,Message:Started container kube-scheduler-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.178715684 +0000 UTC m=+3.325730131,LastTimestamp:2026-02-27 16:24:01.178715684 +0000 UTC m=+3.325730131,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.063458 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718e3897621 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Created,Message:Created container kube-apiserver-cert-regeneration-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.220425249 +0000 UTC m=+3.367439696,LastTimestamp:2026-02-27 16:24:01.220425249 +0000 UTC m=+3.367439696,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.067857 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718e47ff4cb openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Started,Message:Started container kube-apiserver-cert-regeneration-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.236579531 +0000 UTC m=+3.383593978,LastTimestamp:2026-02-27 16:24:01.236579531 +0000 UTC m=+3.383593978,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.071663 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718e493d4b7 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.237882039 +0000 UTC m=+3.384896486,LastTimestamp:2026-02-27 16:24:01.237882039 +0000 UTC m=+3.384896486,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.075803 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718f13c6b0d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Created,Message:Created container kube-apiserver-insecure-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.450257165 +0000 UTC m=+3.597271612,LastTimestamp:2026-02-27 16:24:01.450257165 +0000 UTC m=+3.597271612,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.081798 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718f1dd40d2 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Started,Message:Started container kube-apiserver-insecure-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.46079765 +0000 UTC m=+3.607812117,LastTimestamp:2026-02-27 16:24:01.46079765 +0000 UTC m=+3.607812117,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.087676 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718f1ee8b2c openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.461930796 +0000 UTC m=+3.608945243,LastTimestamp:2026-02-27 16:24:01.461930796 +0000 UTC m=+3.608945243,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.094510 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.18982718f894c583 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.573488003 +0000 UTC m=+3.720502450,LastTimestamp:2026-02-27 16:24:01.573488003 +0000 UTC m=+3.720502450,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.103677 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718fcf2d7b9 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Created,Message:Created container kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.646761913 +0000 UTC m=+3.793776360,LastTimestamp:2026-02-27 16:24:01.646761913 +0000 UTC m=+3.793776360,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.108147 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.18982718fe4db974 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Started,Message:Started container kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.669495156 +0000 UTC m=+3.816509603,LastTimestamp:2026-02-27 16:24:01.669495156 +0000 UTC m=+3.816509603,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.113482 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271902b60f3d openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Created,Message:Created container etcd-resources-copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.743441725 +0000 UTC m=+3.890456172,LastTimestamp:2026-02-27 16:24:01.743441725 +0000 UTC m=+3.890456172,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.117571 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271903740368 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Started,Message:Started container etcd-resources-copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:01.755890536 +0000 UTC m=+3.902904983,LastTimestamp:2026-02-27 16:24:01.755890536 +0000 UTC m=+3.902904983,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.121879 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.18982719351883de openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:02.58875491 +0000 UTC m=+4.735769397,LastTimestamp:2026-02-27 16:24:02.58875491 +0000 UTC m=+4.735769397,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.129373 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271940f6da2f openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Created,Message:Created container etcdctl,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:02.787875375 +0000 UTC m=+4.934889852,LastTimestamp:2026-02-27 16:24:02.787875375 +0000 UTC m=+4.934889852,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.154328 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271941a579c5 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Started,Message:Started container etcdctl,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:02.799319493 +0000 UTC m=+4.946333950,LastTimestamp:2026-02-27 16:24:02.799319493 +0000 UTC m=+4.946333950,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.164036 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271941bc3a41 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:02.800810561 +0000 UTC m=+4.947825008,LastTimestamp:2026-02-27 16:24:02.800810561 +0000 UTC m=+4.947825008,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.167214 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189827195032f24a openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Created,Message:Created container etcd,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.043471946 +0000 UTC m=+5.190486413,LastTimestamp:2026-02-27 16:24:03.043471946 +0000 UTC m=+5.190486413,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.170526 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189827195137e688 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Started,Message:Started container etcd,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.060573832 +0000 UTC m=+5.207588309,LastTimestamp:2026-02-27 16:24:03.060573832 +0000 UTC m=+5.207588309,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.174160 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189827195152cd53 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.062336851 +0000 UTC m=+5.209351308,LastTimestamp:2026-02-27 16:24:03.062336851 +0000 UTC m=+5.209351308,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.178204 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271960a0a57a openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Created,Message:Created container etcd-metrics,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.319096698 +0000 UTC m=+5.466111155,LastTimestamp:2026-02-27 16:24:03.319096698 +0000 UTC m=+5.466111155,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.182327 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271961b0e13c openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Started,Message:Started container etcd-metrics,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.336937788 +0000 UTC m=+5.483952245,LastTimestamp:2026-02-27 16:24:03.336937788 +0000 UTC m=+5.483952245,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.186531 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271961c19f1d openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.338034973 +0000 UTC m=+5.485049460,LastTimestamp:2026-02-27 16:24:03.338034973 +0000 UTC m=+5.485049460,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.191546 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.18982719725286d9 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Created,Message:Created container etcd-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.615966937 +0000 UTC m=+5.762981394,LastTimestamp:2026-02-27 16:24:03.615966937 +0000 UTC m=+5.762981394,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.197749 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271973854e3e openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Started,Message:Started container etcd-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.636071998 +0000 UTC m=+5.783086455,LastTimestamp:2026-02-27 16:24:03.636071998 +0000 UTC m=+5.783086455,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.202646 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.18982719739811c8 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-rev},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.637301704 +0000 UTC m=+5.784316181,LastTimestamp:2026-02-27 16:24:03.637301704 +0000 UTC m=+5.784316181,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.206961 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271980fbfd9d openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-rev},},Reason:Created,Message:Created container etcd-rev,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.861953949 +0000 UTC m=+6.008968406,LastTimestamp:2026-02-27 16:24:03.861953949 +0000 UTC m=+6.008968406,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.210960 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.1898271981f8d7e7 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-rev},},Reason:Started,Message:Started container etcd-rev,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:03.878524903 +0000 UTC m=+6.025539380,LastTimestamp:2026-02-27 16:24:03.878524903 +0000 UTC m=+6.025539380,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.218886 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Feb 27 16:24:33 crc kubenswrapper[4751]: &Event{ObjectMeta:{kube-controller-manager-crc.1898271b3e40f9ce openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://192.168.126.11:10357/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Feb 27 16:24:33 crc kubenswrapper[4751]: body: Feb 27 16:24:33 crc kubenswrapper[4751]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:11.332336078 +0000 UTC m=+13.479350565,LastTimestamp:2026-02-27 16:24:11.332336078 +0000 UTC m=+13.479350565,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Feb 27 16:24:33 crc kubenswrapper[4751]: > Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.222593 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.1898271b3e4371f8 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:11.332497912 +0000 UTC m=+13.479512389,LastTimestamp:2026-02-27 16:24:11.332497912 +0000 UTC m=+13.479512389,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.226560 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Feb 27 16:24:33 crc kubenswrapper[4751]: &Event{ObjectMeta:{kube-apiserver-crc.1898271b5c389d0f openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:17697/healthz": dial tcp 192.168.126.11:17697: connect: connection refused Feb 27 16:24:33 crc kubenswrapper[4751]: body: Feb 27 16:24:33 crc kubenswrapper[4751]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:11.835104527 +0000 UTC m=+13.982119004,LastTimestamp:2026-02-27 16:24:11.835104527 +0000 UTC m=+13.982119004,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Feb 27 16:24:33 crc kubenswrapper[4751]: > Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.231471 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.1898271b5c39756e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Unhealthy,Message:Readiness probe failed: Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:11.835159918 +0000 UTC m=+13.982174395,LastTimestamp:2026-02-27 16:24:11.835159918 +0000 UTC m=+13.982174395,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.235823 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Feb 27 16:24:33 crc kubenswrapper[4751]: &Event{ObjectMeta:{kube-apiserver-crc.1898271b88c4f7a7 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:ProbeError,Message:Liveness probe error: Get "https://192.168.126.11:17697/healthz": dial tcp 192.168.126.11:17697: connect: connection refused Feb 27 16:24:33 crc kubenswrapper[4751]: body: Feb 27 16:24:33 crc kubenswrapper[4751]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:12.582500263 +0000 UTC m=+14.729514740,LastTimestamp:2026-02-27 16:24:12.582500263 +0000 UTC m=+14.729514740,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Feb 27 16:24:33 crc kubenswrapper[4751]: > Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.241355 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.1898271b88c5d934 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Unhealthy,Message:Liveness probe failed: Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:12.582558004 +0000 UTC m=+14.729572491,LastTimestamp:2026-02-27 16:24:12.582558004 +0000 UTC m=+14.729572491,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.244723 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Feb 27 16:24:33 crc kubenswrapper[4751]: &Event{ObjectMeta:{kube-apiserver-crc.1898271b8bebc69a openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:ProbeError,Message:Startup probe error: HTTP probe failed with statuscode: 403 Feb 27 16:24:33 crc kubenswrapper[4751]: body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Feb 27 16:24:33 crc kubenswrapper[4751]: Feb 27 16:24:33 crc kubenswrapper[4751]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:12.635375258 +0000 UTC m=+14.782389745,LastTimestamp:2026-02-27 16:24:12.635375258 +0000 UTC m=+14.782389745,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Feb 27 16:24:33 crc kubenswrapper[4751]: > Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.250226 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.1898271b8bed4b99 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Unhealthy,Message:Startup probe failed: HTTP probe failed with statuscode: 403,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:12.635474841 +0000 UTC m=+14.782489318,LastTimestamp:2026-02-27 16:24:12.635474841 +0000 UTC m=+14.782489318,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.253903 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.1898271b8bebc69a\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Feb 27 16:24:33 crc kubenswrapper[4751]: &Event{ObjectMeta:{kube-apiserver-crc.1898271b8bebc69a openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:f4b27818a5e8e43d0dc095d08835c792,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:ProbeError,Message:Startup probe error: HTTP probe failed with statuscode: 403 Feb 27 16:24:33 crc kubenswrapper[4751]: body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Feb 27 16:24:33 crc kubenswrapper[4751]: Feb 27 16:24:33 crc kubenswrapper[4751]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:12.635375258 +0000 UTC m=+14.782389745,LastTimestamp:2026-02-27 16:24:12.646669626 +0000 UTC m=+14.793684113,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Feb 27 16:24:33 crc kubenswrapper[4751]: > Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.260705 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Feb 27 16:24:33 crc kubenswrapper[4751]: &Event{ObjectMeta:{kube-controller-manager-crc.1898271d924878a6 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://192.168.126.11:10357/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Feb 27 16:24:33 crc kubenswrapper[4751]: body: Feb 27 16:24:33 crc kubenswrapper[4751]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:21.332048038 +0000 UTC m=+23.479062485,LastTimestamp:2026-02-27 16:24:21.332048038 +0000 UTC m=+23.479062485,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Feb 27 16:24:33 crc kubenswrapper[4751]: > Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.265327 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.1898271d92492469 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:21.332092009 +0000 UTC m=+23.479106456,LastTimestamp:2026-02-27 16:24:21.332092009 +0000 UTC m=+23.479106456,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.272598 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Feb 27 16:24:33 crc kubenswrapper[4751]: &Event{ObjectMeta:{kube-controller-manager-crc.1898271fe2d9207e openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://192.168.126.11:10357/healthz": read tcp 192.168.126.11:54556->192.168.126.11:10357: read: connection reset by peer Feb 27 16:24:33 crc kubenswrapper[4751]: body: Feb 27 16:24:33 crc kubenswrapper[4751]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:31.273640062 +0000 UTC m=+33.420654569,LastTimestamp:2026-02-27 16:24:31.273640062 +0000 UTC m=+33.420654569,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Feb 27 16:24:33 crc kubenswrapper[4751]: > Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.278732 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.1898271fe2e51abf openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:54556->192.168.126.11:10357: read: connection reset by peer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:31.274425023 +0000 UTC m=+33.421439510,LastTimestamp:2026-02-27 16:24:31.274425023 +0000 UTC m=+33.421439510,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.284902 4751 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.1898271fe314f8f9 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Killing,Message:Container cluster-policy-controller failed startup probe, will be restarted,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:31.277562105 +0000 UTC m=+33.424576612,LastTimestamp:2026-02-27 16:24:31.277562105 +0000 UTC m=+33.424576612,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.291686 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.189827189094e358\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189827189094e358 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:23:59.828665176 +0000 UTC m=+1.975679633,LastTimestamp:2026-02-27 16:24:31.799170612 +0000 UTC m=+33.946185069,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.297082 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.18982718a403f6f4\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718a403f6f4 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Created,Message:Created container cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.154711796 +0000 UTC m=+2.301726253,LastTimestamp:2026-02-27 16:24:32.08175455 +0000 UTC m=+34.228769007,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: E0227 16:24:33.303230 4751 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.18982718a51753fe\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.18982718a51753fe openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Started,Message:Started container cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:24:00.172758014 +0000 UTC m=+2.319772501,LastTimestamp:2026-02-27 16:24:32.09879883 +0000 UTC m=+34.245813287,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.459749 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.519793 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.521388 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.521491 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.521517 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.522320 4751 scope.go:117] "RemoveContainer" containerID="edc27e8f7cfeb6da896d488d21bee14332117a0646e2f2934d51ca4c44df06b7" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.716467 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.717161 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.717185 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:33 crc kubenswrapper[4751]: I0227 16:24:33.717196 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:34 crc kubenswrapper[4751]: I0227 16:24:34.460392 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:34 crc kubenswrapper[4751]: I0227 16:24:34.723245 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 27 16:24:34 crc kubenswrapper[4751]: I0227 16:24:34.726109 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"93a9178aabebb94c6e66d434dd60b66bf62cf145ea122583b88fdc47a361545d"} Feb 27 16:24:34 crc kubenswrapper[4751]: I0227 16:24:34.726333 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:34 crc kubenswrapper[4751]: I0227 16:24:34.727661 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:34 crc kubenswrapper[4751]: I0227 16:24:34.727707 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:34 crc kubenswrapper[4751]: I0227 16:24:34.727722 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.458058 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.732304 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/2.log" Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.733151 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.735357 4751 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="93a9178aabebb94c6e66d434dd60b66bf62cf145ea122583b88fdc47a361545d" exitCode=255 Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.735447 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"93a9178aabebb94c6e66d434dd60b66bf62cf145ea122583b88fdc47a361545d"} Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.735507 4751 scope.go:117] "RemoveContainer" containerID="edc27e8f7cfeb6da896d488d21bee14332117a0646e2f2934d51ca4c44df06b7" Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.735713 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.737372 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.737438 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.737453 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:35 crc kubenswrapper[4751]: I0227 16:24:35.738272 4751 scope.go:117] "RemoveContainer" containerID="93a9178aabebb94c6e66d434dd60b66bf62cf145ea122583b88fdc47a361545d" Feb 27 16:24:35 crc kubenswrapper[4751]: E0227 16:24:35.738518 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:24:36 crc kubenswrapper[4751]: I0227 16:24:36.460664 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:36 crc kubenswrapper[4751]: I0227 16:24:36.742494 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/2.log" Feb 27 16:24:37 crc kubenswrapper[4751]: I0227 16:24:37.460667 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:37 crc kubenswrapper[4751]: I0227 16:24:37.966811 4751 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 27 16:24:37 crc kubenswrapper[4751]: I0227 16:24:37.985586 4751 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Feb 27 16:24:37 crc kubenswrapper[4751]: I0227 16:24:37.997839 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:37 crc kubenswrapper[4751]: I0227 16:24:37.998085 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:37 crc kubenswrapper[4751]: I0227 16:24:37.999769 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:37 crc kubenswrapper[4751]: I0227 16:24:37.999836 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:37 crc kubenswrapper[4751]: I0227 16:24:37.999861 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:38 crc kubenswrapper[4751]: I0227 16:24:38.331710 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:38 crc kubenswrapper[4751]: I0227 16:24:38.459639 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:38 crc kubenswrapper[4751]: E0227 16:24:38.592128 4751 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 27 16:24:38 crc kubenswrapper[4751]: I0227 16:24:38.750837 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:38 crc kubenswrapper[4751]: I0227 16:24:38.752332 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:38 crc kubenswrapper[4751]: I0227 16:24:38.752383 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:38 crc kubenswrapper[4751]: I0227 16:24:38.752440 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:39 crc kubenswrapper[4751]: I0227 16:24:39.457544 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:39 crc kubenswrapper[4751]: I0227 16:24:39.737482 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:39 crc kubenswrapper[4751]: I0227 16:24:39.752516 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:39 crc kubenswrapper[4751]: I0227 16:24:39.753431 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:39 crc kubenswrapper[4751]: I0227 16:24:39.753471 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:39 crc kubenswrapper[4751]: I0227 16:24:39.753484 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:40 crc kubenswrapper[4751]: E0227 16:24:40.056278 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Feb 27 16:24:40 crc kubenswrapper[4751]: I0227 16:24:40.057364 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:40 crc kubenswrapper[4751]: I0227 16:24:40.059649 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:40 crc kubenswrapper[4751]: I0227 16:24:40.059846 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:40 crc kubenswrapper[4751]: I0227 16:24:40.059976 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:40 crc kubenswrapper[4751]: I0227 16:24:40.060127 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:24:40 crc kubenswrapper[4751]: E0227 16:24:40.066451 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes is forbidden: User \"system:anonymous\" cannot create resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Feb 27 16:24:40 crc kubenswrapper[4751]: I0227 16:24:40.459147 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:40 crc kubenswrapper[4751]: W0227 16:24:40.846685 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes "crc" is forbidden: User "system:anonymous" cannot list resource "nodes" in API group "" at the cluster scope Feb 27 16:24:40 crc kubenswrapper[4751]: E0227 16:24:40.846757 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes \"crc\" is forbidden: User \"system:anonymous\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" Feb 27 16:24:41 crc kubenswrapper[4751]: W0227 16:24:41.243579 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:anonymous" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:41 crc kubenswrapper[4751]: E0227 16:24:41.243641 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" Feb 27 16:24:41 crc kubenswrapper[4751]: I0227 16:24:41.459150 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:41 crc kubenswrapper[4751]: I0227 16:24:41.834689 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:24:41 crc kubenswrapper[4751]: I0227 16:24:41.835352 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:41 crc kubenswrapper[4751]: I0227 16:24:41.836744 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:41 crc kubenswrapper[4751]: I0227 16:24:41.836795 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:41 crc kubenswrapper[4751]: I0227 16:24:41.836811 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:41 crc kubenswrapper[4751]: I0227 16:24:41.837605 4751 scope.go:117] "RemoveContainer" containerID="93a9178aabebb94c6e66d434dd60b66bf62cf145ea122583b88fdc47a361545d" Feb 27 16:24:41 crc kubenswrapper[4751]: E0227 16:24:41.837856 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:24:42 crc kubenswrapper[4751]: I0227 16:24:42.458327 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:42 crc kubenswrapper[4751]: I0227 16:24:42.582573 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:24:42 crc kubenswrapper[4751]: I0227 16:24:42.760657 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:42 crc kubenswrapper[4751]: I0227 16:24:42.761713 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:42 crc kubenswrapper[4751]: I0227 16:24:42.761752 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:42 crc kubenswrapper[4751]: I0227 16:24:42.761761 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:42 crc kubenswrapper[4751]: I0227 16:24:42.762296 4751 scope.go:117] "RemoveContainer" containerID="93a9178aabebb94c6e66d434dd60b66bf62cf145ea122583b88fdc47a361545d" Feb 27 16:24:42 crc kubenswrapper[4751]: E0227 16:24:42.762476 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:24:43 crc kubenswrapper[4751]: W0227 16:24:43.409011 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:anonymous" cannot list resource "services" in API group "" at the cluster scope Feb 27 16:24:43 crc kubenswrapper[4751]: E0227 16:24:43.409094 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:anonymous\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" Feb 27 16:24:43 crc kubenswrapper[4751]: I0227 16:24:43.457465 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:44 crc kubenswrapper[4751]: I0227 16:24:44.457017 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:45 crc kubenswrapper[4751]: I0227 16:24:45.458642 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:46 crc kubenswrapper[4751]: I0227 16:24:46.458300 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:47 crc kubenswrapper[4751]: E0227 16:24:47.060951 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Feb 27 16:24:47 crc kubenswrapper[4751]: I0227 16:24:47.067052 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:47 crc kubenswrapper[4751]: I0227 16:24:47.068684 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:47 crc kubenswrapper[4751]: I0227 16:24:47.068753 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:47 crc kubenswrapper[4751]: I0227 16:24:47.068772 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:47 crc kubenswrapper[4751]: I0227 16:24:47.068811 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:24:47 crc kubenswrapper[4751]: E0227 16:24:47.074110 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes is forbidden: User \"system:anonymous\" cannot create resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Feb 27 16:24:47 crc kubenswrapper[4751]: I0227 16:24:47.460024 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:48 crc kubenswrapper[4751]: I0227 16:24:48.002813 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:24:48 crc kubenswrapper[4751]: I0227 16:24:48.003075 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:48 crc kubenswrapper[4751]: I0227 16:24:48.005137 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:48 crc kubenswrapper[4751]: I0227 16:24:48.005293 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:48 crc kubenswrapper[4751]: I0227 16:24:48.005393 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:48 crc kubenswrapper[4751]: I0227 16:24:48.460146 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:48 crc kubenswrapper[4751]: E0227 16:24:48.593255 4751 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 27 16:24:49 crc kubenswrapper[4751]: I0227 16:24:49.459701 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:50 crc kubenswrapper[4751]: I0227 16:24:50.459868 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:50 crc kubenswrapper[4751]: W0227 16:24:50.817448 4751 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: runtimeclasses.node.k8s.io is forbidden: User "system:anonymous" cannot list resource "runtimeclasses" in API group "node.k8s.io" at the cluster scope Feb 27 16:24:50 crc kubenswrapper[4751]: E0227 16:24:50.817519 4751 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: runtimeclasses.node.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"runtimeclasses\" in API group \"node.k8s.io\" at the cluster scope" logger="UnhandledError" Feb 27 16:24:51 crc kubenswrapper[4751]: I0227 16:24:51.461205 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:51 crc kubenswrapper[4751]: I0227 16:24:51.776282 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 27 16:24:51 crc kubenswrapper[4751]: I0227 16:24:51.776512 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:51 crc kubenswrapper[4751]: I0227 16:24:51.777825 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:51 crc kubenswrapper[4751]: I0227 16:24:51.777866 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:51 crc kubenswrapper[4751]: I0227 16:24:51.777882 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:52 crc kubenswrapper[4751]: I0227 16:24:52.461013 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:53 crc kubenswrapper[4751]: I0227 16:24:53.461593 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:54 crc kubenswrapper[4751]: E0227 16:24:54.069484 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Feb 27 16:24:54 crc kubenswrapper[4751]: I0227 16:24:54.074465 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:54 crc kubenswrapper[4751]: I0227 16:24:54.076212 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:54 crc kubenswrapper[4751]: I0227 16:24:54.076260 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:54 crc kubenswrapper[4751]: I0227 16:24:54.076278 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:54 crc kubenswrapper[4751]: I0227 16:24:54.076314 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:24:54 crc kubenswrapper[4751]: E0227 16:24:54.085436 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes is forbidden: User \"system:anonymous\" cannot create resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Feb 27 16:24:54 crc kubenswrapper[4751]: I0227 16:24:54.458042 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:55 crc kubenswrapper[4751]: I0227 16:24:55.460449 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.459966 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.520059 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.521709 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.521770 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.521794 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.522959 4751 scope.go:117] "RemoveContainer" containerID="93a9178aabebb94c6e66d434dd60b66bf62cf145ea122583b88fdc47a361545d" Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.809094 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/2.log" Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.811960 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631"} Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.812214 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.813441 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.813494 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:56 crc kubenswrapper[4751]: I0227 16:24:56.813513 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:57 crc kubenswrapper[4751]: I0227 16:24:57.460219 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.457552 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:58 crc kubenswrapper[4751]: E0227 16:24:58.594110 4751 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.820943 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/3.log" Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.821923 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/2.log" Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.825073 4751 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631" exitCode=255 Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.825129 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631"} Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.825192 4751 scope.go:117] "RemoveContainer" containerID="93a9178aabebb94c6e66d434dd60b66bf62cf145ea122583b88fdc47a361545d" Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.825348 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.827119 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.827193 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.827212 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:24:58 crc kubenswrapper[4751]: I0227 16:24:58.828534 4751 scope.go:117] "RemoveContainer" containerID="cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631" Feb 27 16:24:58 crc kubenswrapper[4751]: E0227 16:24:58.829008 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:24:59 crc kubenswrapper[4751]: I0227 16:24:59.462233 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:24:59 crc kubenswrapper[4751]: I0227 16:24:59.832000 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/3.log" Feb 27 16:25:00 crc kubenswrapper[4751]: I0227 16:25:00.457212 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:25:01 crc kubenswrapper[4751]: E0227 16:25:01.077084 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.086210 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.087803 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.087851 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.087870 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.087901 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:25:01 crc kubenswrapper[4751]: E0227 16:25:01.095209 4751 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes is forbidden: User \"system:anonymous\" cannot create resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.459280 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.834780 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.835038 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.836880 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.836934 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.836956 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:01 crc kubenswrapper[4751]: I0227 16:25:01.837971 4751 scope.go:117] "RemoveContainer" containerID="cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631" Feb 27 16:25:01 crc kubenswrapper[4751]: E0227 16:25:01.838256 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:25:02 crc kubenswrapper[4751]: I0227 16:25:02.463215 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:25:02 crc kubenswrapper[4751]: I0227 16:25:02.582337 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:25:02 crc kubenswrapper[4751]: I0227 16:25:02.582639 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:25:02 crc kubenswrapper[4751]: I0227 16:25:02.584137 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:02 crc kubenswrapper[4751]: I0227 16:25:02.584222 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:02 crc kubenswrapper[4751]: I0227 16:25:02.584249 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:02 crc kubenswrapper[4751]: I0227 16:25:02.585383 4751 scope.go:117] "RemoveContainer" containerID="cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631" Feb 27 16:25:02 crc kubenswrapper[4751]: E0227 16:25:02.585987 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:25:03 crc kubenswrapper[4751]: I0227 16:25:03.456086 4751 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Feb 27 16:25:04 crc kubenswrapper[4751]: I0227 16:25:04.178700 4751 csr.go:261] certificate signing request csr-mqnm4 is approved, waiting to be issued Feb 27 16:25:04 crc kubenswrapper[4751]: I0227 16:25:04.191714 4751 csr.go:257] certificate signing request csr-mqnm4 is issued Feb 27 16:25:04 crc kubenswrapper[4751]: I0227 16:25:04.271701 4751 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Feb 27 16:25:04 crc kubenswrapper[4751]: I0227 16:25:04.306153 4751 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Feb 27 16:25:05 crc kubenswrapper[4751]: I0227 16:25:05.193035 4751 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-02-24 05:54:36 +0000 UTC, rotation deadline is 2027-01-16 16:15:02.267489962 +0000 UTC Feb 27 16:25:05 crc kubenswrapper[4751]: I0227 16:25:05.193297 4751 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7751h49m57.074196262s for next certificate rotation Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.095388 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.097140 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.097187 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.097197 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.097303 4751 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.106829 4751 kubelet_node_status.go:115] "Node was previously registered" node="crc" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.107154 4751 kubelet_node_status.go:79] "Successfully registered node" node="crc" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.107192 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": node \"crc\" not found" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.110714 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.110767 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.110784 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.110809 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.110830 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:08Z","lastTransitionTime":"2026-02-27T16:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.134360 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.147220 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.147291 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.147336 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.147370 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.147392 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:08Z","lastTransitionTime":"2026-02-27T16:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.166597 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.177103 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.177171 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.177191 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.177220 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.177242 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:08Z","lastTransitionTime":"2026-02-27T16:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.192674 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.201746 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.201894 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.201968 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.202029 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:08 crc kubenswrapper[4751]: I0227 16:25:08.202103 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:08Z","lastTransitionTime":"2026-02-27T16:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.212159 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.212367 4751 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.212431 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.313421 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.413960 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.514842 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.594810 4751 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.615584 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.716692 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.817244 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:08 crc kubenswrapper[4751]: E0227 16:25:08.918250 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:09 crc kubenswrapper[4751]: E0227 16:25:09.019258 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:09 crc kubenswrapper[4751]: E0227 16:25:09.120445 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:09 crc kubenswrapper[4751]: E0227 16:25:09.221344 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:09 crc kubenswrapper[4751]: E0227 16:25:09.322222 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:09 crc kubenswrapper[4751]: E0227 16:25:09.423342 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:09 crc kubenswrapper[4751]: E0227 16:25:09.524386 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:09 crc kubenswrapper[4751]: E0227 16:25:09.625133 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:09 crc kubenswrapper[4751]: E0227 16:25:09.725248 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:09 crc kubenswrapper[4751]: E0227 16:25:09.826314 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:09 crc kubenswrapper[4751]: E0227 16:25:09.927456 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:10 crc kubenswrapper[4751]: E0227 16:25:10.027646 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:10 crc kubenswrapper[4751]: E0227 16:25:10.128812 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:10 crc kubenswrapper[4751]: E0227 16:25:10.229868 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:10 crc kubenswrapper[4751]: E0227 16:25:10.330863 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:10 crc kubenswrapper[4751]: E0227 16:25:10.431552 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:10 crc kubenswrapper[4751]: E0227 16:25:10.532444 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:10 crc kubenswrapper[4751]: E0227 16:25:10.633280 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:10 crc kubenswrapper[4751]: E0227 16:25:10.734110 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:10 crc kubenswrapper[4751]: E0227 16:25:10.835024 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:10 crc kubenswrapper[4751]: E0227 16:25:10.935788 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:11 crc kubenswrapper[4751]: E0227 16:25:11.036729 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:11 crc kubenswrapper[4751]: E0227 16:25:11.137262 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:11 crc kubenswrapper[4751]: E0227 16:25:11.238148 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:11 crc kubenswrapper[4751]: E0227 16:25:11.339105 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:11 crc kubenswrapper[4751]: E0227 16:25:11.440227 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:11 crc kubenswrapper[4751]: E0227 16:25:11.540767 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:11 crc kubenswrapper[4751]: E0227 16:25:11.641831 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:11 crc kubenswrapper[4751]: E0227 16:25:11.742824 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:11 crc kubenswrapper[4751]: E0227 16:25:11.843945 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:11 crc kubenswrapper[4751]: E0227 16:25:11.944245 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:12 crc kubenswrapper[4751]: E0227 16:25:12.045333 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:12 crc kubenswrapper[4751]: E0227 16:25:12.146756 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:12 crc kubenswrapper[4751]: E0227 16:25:12.247392 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:12 crc kubenswrapper[4751]: E0227 16:25:12.348282 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:12 crc kubenswrapper[4751]: E0227 16:25:12.449209 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:12 crc kubenswrapper[4751]: E0227 16:25:12.549381 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:12 crc kubenswrapper[4751]: E0227 16:25:12.650030 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:12 crc kubenswrapper[4751]: E0227 16:25:12.750819 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:12 crc kubenswrapper[4751]: E0227 16:25:12.851017 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:12 crc kubenswrapper[4751]: E0227 16:25:12.951524 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.052610 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:13 crc kubenswrapper[4751]: I0227 16:25:13.104176 4751 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.153656 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.254860 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.355268 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.456043 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:13 crc kubenswrapper[4751]: I0227 16:25:13.520275 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:25:13 crc kubenswrapper[4751]: I0227 16:25:13.521797 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:13 crc kubenswrapper[4751]: I0227 16:25:13.521857 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:13 crc kubenswrapper[4751]: I0227 16:25:13.521874 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:13 crc kubenswrapper[4751]: I0227 16:25:13.522801 4751 scope.go:117] "RemoveContainer" containerID="cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631" Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.523081 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.557122 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.658239 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:13 crc kubenswrapper[4751]: I0227 16:25:13.734098 4751 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.759288 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.860148 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:13 crc kubenswrapper[4751]: E0227 16:25:13.961049 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:14 crc kubenswrapper[4751]: E0227 16:25:14.062060 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:14 crc kubenswrapper[4751]: E0227 16:25:14.162623 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:14 crc kubenswrapper[4751]: E0227 16:25:14.263036 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:14 crc kubenswrapper[4751]: E0227 16:25:14.364058 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:14 crc kubenswrapper[4751]: E0227 16:25:14.464183 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:14 crc kubenswrapper[4751]: E0227 16:25:14.565328 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:14 crc kubenswrapper[4751]: E0227 16:25:14.666458 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:14 crc kubenswrapper[4751]: E0227 16:25:14.766607 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:14 crc kubenswrapper[4751]: E0227 16:25:14.867115 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:14 crc kubenswrapper[4751]: E0227 16:25:14.968257 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:15 crc kubenswrapper[4751]: E0227 16:25:15.069243 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:15 crc kubenswrapper[4751]: E0227 16:25:15.170244 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:15 crc kubenswrapper[4751]: E0227 16:25:15.270443 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:15 crc kubenswrapper[4751]: E0227 16:25:15.370888 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:15 crc kubenswrapper[4751]: E0227 16:25:15.471760 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:15 crc kubenswrapper[4751]: E0227 16:25:15.572317 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:15 crc kubenswrapper[4751]: E0227 16:25:15.672736 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:15 crc kubenswrapper[4751]: E0227 16:25:15.773365 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:15 crc kubenswrapper[4751]: E0227 16:25:15.874290 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:15 crc kubenswrapper[4751]: E0227 16:25:15.974600 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:16 crc kubenswrapper[4751]: E0227 16:25:16.075640 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:16 crc kubenswrapper[4751]: E0227 16:25:16.176365 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:16 crc kubenswrapper[4751]: E0227 16:25:16.277462 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:16 crc kubenswrapper[4751]: E0227 16:25:16.378057 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:16 crc kubenswrapper[4751]: E0227 16:25:16.478770 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:16 crc kubenswrapper[4751]: E0227 16:25:16.578882 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:16 crc kubenswrapper[4751]: E0227 16:25:16.679543 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:16 crc kubenswrapper[4751]: E0227 16:25:16.779736 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:16 crc kubenswrapper[4751]: E0227 16:25:16.880474 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:16 crc kubenswrapper[4751]: E0227 16:25:16.980929 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:17 crc kubenswrapper[4751]: E0227 16:25:17.081292 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:17 crc kubenswrapper[4751]: E0227 16:25:17.181667 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:17 crc kubenswrapper[4751]: E0227 16:25:17.282871 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:17 crc kubenswrapper[4751]: E0227 16:25:17.383803 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:17 crc kubenswrapper[4751]: E0227 16:25:17.484811 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:17 crc kubenswrapper[4751]: E0227 16:25:17.585646 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:17 crc kubenswrapper[4751]: E0227 16:25:17.685957 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:17 crc kubenswrapper[4751]: E0227 16:25:17.786930 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:17 crc kubenswrapper[4751]: E0227 16:25:17.888006 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:17 crc kubenswrapper[4751]: E0227 16:25:17.988125 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.089242 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.189870 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.222463 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.227951 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.228025 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.228039 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.228069 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.228090 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:18Z","lastTransitionTime":"2026-02-27T16:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.248864 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.255025 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.255128 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.255152 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.255186 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.255210 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:18Z","lastTransitionTime":"2026-02-27T16:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.271550 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.276731 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.276770 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.276786 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.276808 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.276820 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:18Z","lastTransitionTime":"2026-02-27T16:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.290872 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.296540 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.296599 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.296617 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.296644 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:18 crc kubenswrapper[4751]: I0227 16:25:18.296665 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:18Z","lastTransitionTime":"2026-02-27T16:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.312935 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.313580 4751 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.313744 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.414447 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.515233 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.595621 4751 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.615582 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.716034 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.816926 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:18 crc kubenswrapper[4751]: E0227 16:25:18.917493 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:19 crc kubenswrapper[4751]: E0227 16:25:19.018085 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:19 crc kubenswrapper[4751]: E0227 16:25:19.119291 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:19 crc kubenswrapper[4751]: E0227 16:25:19.220433 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:19 crc kubenswrapper[4751]: E0227 16:25:19.321186 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:19 crc kubenswrapper[4751]: E0227 16:25:19.423018 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:19 crc kubenswrapper[4751]: E0227 16:25:19.524100 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:19 crc kubenswrapper[4751]: E0227 16:25:19.624978 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:19 crc kubenswrapper[4751]: E0227 16:25:19.725298 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:19 crc kubenswrapper[4751]: E0227 16:25:19.826473 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:19 crc kubenswrapper[4751]: I0227 16:25:19.888654 4751 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Feb 27 16:25:19 crc kubenswrapper[4751]: E0227 16:25:19.928091 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:20 crc kubenswrapper[4751]: E0227 16:25:20.028278 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:20 crc kubenswrapper[4751]: E0227 16:25:20.129111 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:20 crc kubenswrapper[4751]: E0227 16:25:20.230165 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:20 crc kubenswrapper[4751]: E0227 16:25:20.330445 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:20 crc kubenswrapper[4751]: E0227 16:25:20.430845 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:20 crc kubenswrapper[4751]: E0227 16:25:20.531467 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:20 crc kubenswrapper[4751]: E0227 16:25:20.631646 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:20 crc kubenswrapper[4751]: E0227 16:25:20.732361 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:20 crc kubenswrapper[4751]: E0227 16:25:20.833902 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:20 crc kubenswrapper[4751]: E0227 16:25:20.934812 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:21 crc kubenswrapper[4751]: E0227 16:25:21.035489 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:21 crc kubenswrapper[4751]: E0227 16:25:21.136709 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:21 crc kubenswrapper[4751]: E0227 16:25:21.237675 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:21 crc kubenswrapper[4751]: E0227 16:25:21.339116 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:21 crc kubenswrapper[4751]: E0227 16:25:21.439895 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:21 crc kubenswrapper[4751]: I0227 16:25:21.520664 4751 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 27 16:25:21 crc kubenswrapper[4751]: I0227 16:25:21.522590 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:21 crc kubenswrapper[4751]: I0227 16:25:21.522658 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:21 crc kubenswrapper[4751]: I0227 16:25:21.522675 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:21 crc kubenswrapper[4751]: E0227 16:25:21.540355 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:21 crc kubenswrapper[4751]: E0227 16:25:21.640770 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:21 crc kubenswrapper[4751]: E0227 16:25:21.741696 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:21 crc kubenswrapper[4751]: E0227 16:25:21.842208 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:21 crc kubenswrapper[4751]: E0227 16:25:21.943216 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:22 crc kubenswrapper[4751]: E0227 16:25:22.044430 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:22 crc kubenswrapper[4751]: E0227 16:25:22.145882 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:22 crc kubenswrapper[4751]: E0227 16:25:22.247009 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:22 crc kubenswrapper[4751]: E0227 16:25:22.348529 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:22 crc kubenswrapper[4751]: E0227 16:25:22.449106 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:22 crc kubenswrapper[4751]: E0227 16:25:22.550166 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:22 crc kubenswrapper[4751]: E0227 16:25:22.650494 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:22 crc kubenswrapper[4751]: E0227 16:25:22.751607 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:22 crc kubenswrapper[4751]: E0227 16:25:22.852345 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:22 crc kubenswrapper[4751]: E0227 16:25:22.952953 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:23 crc kubenswrapper[4751]: E0227 16:25:23.054155 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:23 crc kubenswrapper[4751]: E0227 16:25:23.154733 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:23 crc kubenswrapper[4751]: E0227 16:25:23.255519 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:23 crc kubenswrapper[4751]: E0227 16:25:23.356279 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:23 crc kubenswrapper[4751]: E0227 16:25:23.456510 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:23 crc kubenswrapper[4751]: E0227 16:25:23.557199 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:23 crc kubenswrapper[4751]: E0227 16:25:23.657734 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:23 crc kubenswrapper[4751]: E0227 16:25:23.758761 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:23 crc kubenswrapper[4751]: E0227 16:25:23.858939 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:23 crc kubenswrapper[4751]: E0227 16:25:23.959936 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:24 crc kubenswrapper[4751]: E0227 16:25:24.060907 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:24 crc kubenswrapper[4751]: E0227 16:25:24.161738 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:24 crc kubenswrapper[4751]: E0227 16:25:24.262757 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:24 crc kubenswrapper[4751]: E0227 16:25:24.363864 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:24 crc kubenswrapper[4751]: E0227 16:25:24.464314 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:24 crc kubenswrapper[4751]: E0227 16:25:24.565589 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:24 crc kubenswrapper[4751]: E0227 16:25:24.666022 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:24 crc kubenswrapper[4751]: E0227 16:25:24.767253 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:24 crc kubenswrapper[4751]: E0227 16:25:24.867485 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:24 crc kubenswrapper[4751]: E0227 16:25:24.968130 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:25 crc kubenswrapper[4751]: E0227 16:25:25.068558 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:25 crc kubenswrapper[4751]: E0227 16:25:25.169568 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:25 crc kubenswrapper[4751]: E0227 16:25:25.270877 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:25 crc kubenswrapper[4751]: E0227 16:25:25.372086 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:25 crc kubenswrapper[4751]: E0227 16:25:25.472779 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:25 crc kubenswrapper[4751]: E0227 16:25:25.573269 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:25 crc kubenswrapper[4751]: E0227 16:25:25.673730 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:25 crc kubenswrapper[4751]: E0227 16:25:25.774718 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:25 crc kubenswrapper[4751]: E0227 16:25:25.875141 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:25 crc kubenswrapper[4751]: E0227 16:25:25.975615 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:26 crc kubenswrapper[4751]: E0227 16:25:26.076537 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:26 crc kubenswrapper[4751]: E0227 16:25:26.177605 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:26 crc kubenswrapper[4751]: E0227 16:25:26.278610 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:26 crc kubenswrapper[4751]: E0227 16:25:26.379180 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:26 crc kubenswrapper[4751]: E0227 16:25:26.480090 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:26 crc kubenswrapper[4751]: E0227 16:25:26.581016 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:26 crc kubenswrapper[4751]: E0227 16:25:26.681446 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:26 crc kubenswrapper[4751]: E0227 16:25:26.781983 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:26 crc kubenswrapper[4751]: E0227 16:25:26.882607 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:26 crc kubenswrapper[4751]: E0227 16:25:26.983666 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:27 crc kubenswrapper[4751]: E0227 16:25:27.084681 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:27 crc kubenswrapper[4751]: E0227 16:25:27.185212 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:27 crc kubenswrapper[4751]: E0227 16:25:27.286299 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:27 crc kubenswrapper[4751]: E0227 16:25:27.386740 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:27 crc kubenswrapper[4751]: E0227 16:25:27.487276 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:27 crc kubenswrapper[4751]: E0227 16:25:27.587877 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:27 crc kubenswrapper[4751]: E0227 16:25:27.688910 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:27 crc kubenswrapper[4751]: E0227 16:25:27.789943 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:27 crc kubenswrapper[4751]: E0227 16:25:27.890596 4751 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 27 16:25:27 crc kubenswrapper[4751]: I0227 16:25:27.949370 4751 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.000290 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.000985 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.001948 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.002010 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.002038 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.104856 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.105166 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.105320 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.105463 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.105596 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.208773 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.209084 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.209211 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.209337 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.209497 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.313176 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.313993 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.314127 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.314253 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.314364 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.417709 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.418009 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.418076 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.418152 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.418223 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.484365 4751 apiserver.go:52] "Watching apiserver" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.492953 4751 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.493299 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-9c6p9","openshift-image-registry/node-ca-w9n9j","openshift-machine-config-operator/machine-config-daemon-rkcdq","openshift-multus/multus-4jc4n","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-node-identity/network-node-identity-vrzqb","openshift-ovn-kubernetes/ovnkube-node-vpxjd","openshift-multus/multus-additional-cni-plugins-zfn22","openshift-multus/network-metrics-daemon-4bnbv","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-operator/iptables-alerter-4ln5h","openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld"] Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.493662 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.493797 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.493912 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.494197 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.494824 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.494988 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.495159 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.495308 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.495668 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.495729 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.495788 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.495900 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.495916 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.495925 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.495936 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.495947 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.496072 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.496272 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.496311 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.496357 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-9c6p9" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.496428 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.497321 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.497453 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.497505 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.499195 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.499194 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.500387 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.500543 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.500556 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.501663 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.502309 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.502484 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.502451 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.502612 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.503557 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.503645 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.503676 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.503702 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.503826 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.503896 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.503963 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.504029 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.504103 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.504157 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.504238 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.504346 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.504424 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.506477 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.507807 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.508007 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.508033 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.508198 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.508238 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.508011 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.508370 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.508375 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.508890 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.509518 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.509610 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.509772 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.518891 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.520131 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.523966 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.523996 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.524007 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.524024 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.524038 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.538873 4751 scope.go:117] "RemoveContainer" containerID="cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.539170 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.539451 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.539816 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.540640 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.546174 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.546210 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.546222 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.546237 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.546249 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.553846 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.556276 4751 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.561531 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.562840 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.568909 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.569374 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.569397 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.569430 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.569447 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.569459 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.583003 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.583263 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.586795 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.586821 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.586831 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.586845 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.586857 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.594857 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.594974 4751 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.596342 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.596377 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.596389 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.596429 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.596442 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.600323 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.609059 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.620009 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.631796 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.637594 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.637717 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.637827 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.637897 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.637965 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638031 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638081 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638097 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638175 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638202 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638226 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638253 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638279 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638304 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638328 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638350 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638372 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638397 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638442 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638469 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638493 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638517 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638541 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638566 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638591 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638618 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638642 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638665 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638689 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638739 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638764 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638785 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638850 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638876 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638900 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638939 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638967 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.638990 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639015 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639042 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639065 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639088 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639110 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639131 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639150 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639190 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639212 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639234 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639255 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639280 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639301 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639323 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639370 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639363 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639394 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639435 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639461 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639486 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639535 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639563 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.639833 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640428 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640535 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640009 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640618 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640764 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640816 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640792 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640842 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640877 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640912 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640923 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640943 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640972 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.641012 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.641043 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.641070 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.641097 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.641124 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.641326 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.640557 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.641685 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.641848 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.641947 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642111 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642142 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.641970 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642163 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642678 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642708 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642866 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642939 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.643122 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.643200 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.643654 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645163 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645233 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645264 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645307 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645336 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645364 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645388 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645472 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645537 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645571 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645601 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645631 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645658 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.645689 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646483 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646549 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646586 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646625 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646653 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646680 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646716 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646752 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646777 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646829 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646880 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646908 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646936 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646968 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.646995 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.647018 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.647047 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.647076 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.647103 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.647128 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.647155 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.647193 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.647222 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648020 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648719 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648750 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648787 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648809 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648829 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648851 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648872 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648890 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648913 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648931 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648952 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648977 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648995 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649017 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649037 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649055 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649075 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649094 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649132 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649151 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649175 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649200 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649219 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649242 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649263 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649283 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649301 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649326 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649347 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649415 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649445 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649470 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649519 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649550 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649606 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649635 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649663 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649694 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649728 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649753 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649783 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649812 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649838 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649868 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649904 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649937 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649966 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650003 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650035 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650068 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650115 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650145 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650172 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650202 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650241 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650268 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650299 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650332 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650367 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650412 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650445 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650488 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650515 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650546 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650574 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650609 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650658 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650696 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650727 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650753 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650790 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650828 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650858 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650890 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650923 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650956 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650981 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651012 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651135 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-system-cni-dir\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651178 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-etc-kubernetes\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651224 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651256 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-netns\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651296 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-os-release\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651354 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-cnibin\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651377 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/dc07559e-a5c7-458c-b3ec-646981b798c1-cni-binary-copy\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651434 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5219b077-c7f8-41e9-831b-9b7dae574b9f-host\") pod \"node-ca-w9n9j\" (UID: \"5219b077-c7f8-41e9-831b-9b7dae574b9f\") " pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651462 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-kubelet\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651492 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-ovn\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651523 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-proxy-tls\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651547 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651567 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-os-release\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651583 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-hostroot\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651607 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/44a8652c-fec7-4403-8f80-37bae0514e16-env-overrides\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651630 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651655 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-var-lib-openvswitch\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651675 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-systemd\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651693 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-bin\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651728 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651789 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/44a8652c-fec7-4403-8f80-37bae0514e16-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651816 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-systemd-units\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651904 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-script-lib\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651933 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-mcd-auth-proxy-config\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651989 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fff69b03-aefa-4148-aa53-2d0f3501eafb-cni-binary-copy\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652013 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-tuning-conf-dir\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652073 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x8p6\" (UniqueName: \"kubernetes.io/projected/5219b077-c7f8-41e9-831b-9b7dae574b9f-kube-api-access-6x8p6\") pod \"node-ca-w9n9j\" (UID: \"5219b077-c7f8-41e9-831b-9b7dae574b9f\") " pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652098 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652153 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652177 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652296 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jp49\" (UniqueName: \"kubernetes.io/projected/7da183a7-dcda-4e22-b135-b1ef0d593811-kube-api-access-7jp49\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652316 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652334 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-system-cni-dir\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652356 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-var-lib-cni-bin\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652375 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25lx5\" (UniqueName: \"kubernetes.io/projected/44a8652c-fec7-4403-8f80-37bae0514e16-kube-api-access-25lx5\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652458 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-rootfs\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652481 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fff69b03-aefa-4148-aa53-2d0f3501eafb-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652502 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mlg4\" (UniqueName: \"kubernetes.io/projected/fff69b03-aefa-4148-aa53-2d0f3501eafb-kube-api-access-4mlg4\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652543 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-var-lib-cni-multus\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652562 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-netd\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652588 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652613 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652691 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-slash\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652745 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-node-log\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652767 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-run-multus-certs\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652798 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/5219b077-c7f8-41e9-831b-9b7dae574b9f-serviceca\") pod \"node-ca-w9n9j\" (UID: \"5219b077-c7f8-41e9-831b-9b7dae574b9f\") " pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652817 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-openvswitch\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652870 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652891 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-run-netns\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652959 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/ac07f9c0-4eff-4c84-8020-ae183619eae7-hosts-file\") pod \"node-resolver-9c6p9\" (UID: \"ac07f9c0-4eff-4c84-8020-ae183619eae7\") " pod="openshift-dns/node-resolver-9c6p9" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642065 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642797 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.654798 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642870 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.642866 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.643420 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.643918 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.644065 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.644465 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.644512 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.644642 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648110 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648496 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648534 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648550 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648791 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648812 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648977 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.648999 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649308 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649344 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649549 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649859 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649891 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.649893 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650004 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650315 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650316 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650328 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650331 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650370 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650359 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650719 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650903 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.650922 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651093 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651464 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651552 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651680 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.651965 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652162 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652346 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652560 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652666 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652810 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.652993 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.653071 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.653189 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.653349 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.653341 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.653349 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.653427 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.653801 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.653866 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.654039 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.654123 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.654249 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.654726 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:25:29.154673034 +0000 UTC m=+91.301687501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.655234 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.656467 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.656597 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.656182 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.657128 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.657302 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.657874 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.657967 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.657988 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.658011 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.658060 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.658252 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.658277 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.658420 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.659057 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.659114 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.659268 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.659482 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.659596 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.659680 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.659695 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.659699 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.660762 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.660792 4751 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.660604 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.661651 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.661725 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.662146 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.662212 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.662239 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.662250 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.662300 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.662597 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.662645 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.662666 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.662703 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.662910 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663038 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663041 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663003 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663088 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663121 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663183 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663193 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663189 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663457 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663554 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663645 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663811 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663882 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663897 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.663971 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.664039 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.664341 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.664385 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.664562 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.664617 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.664994 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665031 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665088 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665097 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665125 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665467 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665606 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665692 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665877 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665934 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.666002 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665997 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.665980 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.666105 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.666115 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.666238 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.666244 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.666512 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.666642 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.666739 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.667448 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.667536 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.658365 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.667631 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.667713 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.667804 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.667865 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.668010 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.668251 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.668272 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669169 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669225 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.658981 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669321 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669625 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm8jw\" (UniqueName: \"kubernetes.io/projected/45a3f89b-11cb-4336-962d-c6835c5f758e-kube-api-access-nm8jw\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669678 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-run-k8s-cni-cncf-io\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669695 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669706 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-daemon-config\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669750 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-config\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669763 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669775 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnvnb\" (UniqueName: \"kubernetes.io/projected/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-kube-api-access-fnvnb\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669787 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669800 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-cnibin\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670232 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/45a3f89b-11cb-4336-962d-c6835c5f758e-ovn-node-metrics-cert\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670279 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-etc-openvswitch\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670333 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-cni-dir\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670367 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-socket-dir-parent\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670427 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-var-lib-kubelet\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670443 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670461 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-conf-dir\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670517 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnxq2\" (UniqueName: \"kubernetes.io/projected/dc07559e-a5c7-458c-b3ec-646981b798c1-kube-api-access-xnxq2\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670578 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qdqf\" (UniqueName: \"kubernetes.io/projected/ac07f9c0-4eff-4c84-8020-ae183619eae7-kube-api-access-4qdqf\") pod \"node-resolver-9c6p9\" (UID: \"ac07f9c0-4eff-4c84-8020-ae183619eae7\") " pod="openshift-dns/node-resolver-9c6p9" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670638 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670682 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.669106 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670697 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670768 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670786 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.670831 4751 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.670904 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-log-socket\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.671161 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.671269 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.671519 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.671606 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.672141 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.672352 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.672835 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.673064 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.673297 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.673692 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:29.170935996 +0000 UTC m=+91.317950533 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.673771 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.673796 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.673824 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/44a8652c-fec7-4403-8f80-37bae0514e16-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.673869 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-ovn-kubernetes\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.673904 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-env-overrides\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.673938 4751 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.674007 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:29.173991029 +0000 UTC m=+91.321005476 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674123 4751 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674159 4751 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674198 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674229 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674261 4751 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674311 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674321 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674599 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674636 4751 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674666 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674685 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674707 4751 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674728 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674748 4751 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674766 4751 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674784 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674802 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674821 4751 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674839 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674857 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674876 4751 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674893 4751 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674910 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674929 4751 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674946 4751 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674964 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.674982 4751 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675000 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675019 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675036 4751 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675053 4751 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675070 4751 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675089 4751 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675106 4751 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675127 4751 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675145 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675162 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675207 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675225 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675242 4751 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675260 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675277 4751 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675295 4751 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675312 4751 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675364 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675381 4751 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675397 4751 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675441 4751 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675460 4751 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675478 4751 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675496 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675514 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675533 4751 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675552 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675570 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675587 4751 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675604 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675621 4751 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675639 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675656 4751 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675673 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675691 4751 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675709 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675727 4751 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675744 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675762 4751 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675779 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675796 4751 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675814 4751 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675832 4751 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675850 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675867 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675884 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675902 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675919 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675943 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675966 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.675986 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676005 4751 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676024 4751 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676042 4751 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676066 4751 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676086 4751 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676105 4751 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676124 4751 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676143 4751 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676162 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676182 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676201 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676220 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676239 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676258 4751 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676278 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676300 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676321 4751 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676342 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676363 4751 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676384 4751 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676425 4751 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676444 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676649 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676679 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676697 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676716 4751 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676733 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676750 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676767 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676785 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676801 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676818 4751 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676837 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676854 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676871 4751 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676888 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676907 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.676924 4751 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677111 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677139 4751 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677159 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677181 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677200 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677216 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677233 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677252 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677269 4751 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677285 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677302 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677319 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677338 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677354 4751 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677372 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677390 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677432 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677449 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677465 4751 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677481 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677519 4751 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677541 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677563 4751 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677582 4751 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677598 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677615 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677633 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677653 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677670 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677679 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677690 4751 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677753 4751 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677775 4751 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677794 4751 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677812 4751 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677827 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677844 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677865 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677885 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677904 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677923 4751 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677942 4751 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677958 4751 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677975 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.677994 4751 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.678011 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.678027 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.678045 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.678061 4751 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.678076 4751 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.678092 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.678110 4751 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.687937 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.687959 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.688087 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.688107 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.688174 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.689175 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.689445 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.689545 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.689578 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.689698 4751 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.689813 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:29.189791088 +0000 UTC m=+91.336805545 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.689987 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.690179 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.690415 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.690729 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.693213 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.693689 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.693826 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.693849 4751 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.694035 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:29.194016762 +0000 UTC m=+91.341031219 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.694272 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.695756 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.697767 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.698233 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.699172 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.699249 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.699388 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.700451 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.700994 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.701376 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.702887 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.702913 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.702922 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.702938 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.702947 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.703367 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.706622 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.715816 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.721960 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.728558 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.730221 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.741852 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.742948 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.750424 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.762545 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.775696 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778637 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/44a8652c-fec7-4403-8f80-37bae0514e16-env-overrides\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778676 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-var-lib-openvswitch\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778692 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-os-release\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778706 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-hostroot\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778720 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-systemd\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778734 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/44a8652c-fec7-4403-8f80-37bae0514e16-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778752 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-bin\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778766 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778782 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fff69b03-aefa-4148-aa53-2d0f3501eafb-cni-binary-copy\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778795 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-systemd-units\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778808 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-script-lib\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778821 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-mcd-auth-proxy-config\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778813 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-var-lib-openvswitch\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778837 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x8p6\" (UniqueName: \"kubernetes.io/projected/5219b077-c7f8-41e9-831b-9b7dae574b9f-kube-api-access-6x8p6\") pod \"node-ca-w9n9j\" (UID: \"5219b077-c7f8-41e9-831b-9b7dae574b9f\") " pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778920 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778963 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-tuning-conf-dir\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.778990 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jp49\" (UniqueName: \"kubernetes.io/projected/7da183a7-dcda-4e22-b135-b1ef0d593811-kube-api-access-7jp49\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779015 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25lx5\" (UniqueName: \"kubernetes.io/projected/44a8652c-fec7-4403-8f80-37bae0514e16-kube-api-access-25lx5\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779040 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-rootfs\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779048 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-os-release\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779061 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fff69b03-aefa-4148-aa53-2d0f3501eafb-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779078 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-hostroot\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779084 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779098 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-systemd\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779106 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-system-cni-dir\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779127 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-var-lib-cni-bin\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779150 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-netd\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779175 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mlg4\" (UniqueName: \"kubernetes.io/projected/fff69b03-aefa-4148-aa53-2d0f3501eafb-kube-api-access-4mlg4\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779198 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-var-lib-cni-multus\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779224 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-slash\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779242 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-node-log\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779278 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-run-multus-certs\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779297 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/5219b077-c7f8-41e9-831b-9b7dae574b9f-serviceca\") pod \"node-ca-w9n9j\" (UID: \"5219b077-c7f8-41e9-831b-9b7dae574b9f\") " pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779340 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-openvswitch\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779362 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm8jw\" (UniqueName: \"kubernetes.io/projected/45a3f89b-11cb-4336-962d-c6835c5f758e-kube-api-access-nm8jw\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779387 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-run-netns\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779426 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/ac07f9c0-4eff-4c84-8020-ae183619eae7-hosts-file\") pod \"node-resolver-9c6p9\" (UID: \"ac07f9c0-4eff-4c84-8020-ae183619eae7\") " pod="openshift-dns/node-resolver-9c6p9" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779450 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnvnb\" (UniqueName: \"kubernetes.io/projected/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-kube-api-access-fnvnb\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779469 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-cnibin\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779490 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-run-k8s-cni-cncf-io\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779510 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-daemon-config\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779530 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-config\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779560 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-etc-openvswitch\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779582 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/45a3f89b-11cb-4336-962d-c6835c5f758e-ovn-node-metrics-cert\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779604 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-conf-dir\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779615 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-var-lib-cni-multus\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779670 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-slash\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779697 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/44a8652c-fec7-4403-8f80-37bae0514e16-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779706 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-node-log\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779740 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-run-multus-certs\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779750 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-systemd-units\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.780517 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-script-lib\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.780578 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/44a8652c-fec7-4403-8f80-37bae0514e16-env-overrides\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.780651 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-bin\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.780716 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-cnibin\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.780764 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-openvswitch\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.781047 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-run-netns\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.781104 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/ac07f9c0-4eff-4c84-8020-ae183619eae7-hosts-file\") pod \"node-resolver-9c6p9\" (UID: \"ac07f9c0-4eff-4c84-8020-ae183619eae7\") " pod="openshift-dns/node-resolver-9c6p9" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.781300 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.781624 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-run-k8s-cni-cncf-io\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.781876 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-var-lib-cni-bin\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.781893 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fff69b03-aefa-4148-aa53-2d0f3501eafb-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.781981 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.782020 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.782155 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-system-cni-dir\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.782325 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-mcd-auth-proxy-config\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.779624 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnxq2\" (UniqueName: \"kubernetes.io/projected/dc07559e-a5c7-458c-b3ec-646981b798c1-kube-api-access-xnxq2\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.782393 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qdqf\" (UniqueName: \"kubernetes.io/projected/ac07f9c0-4eff-4c84-8020-ae183619eae7-kube-api-access-4qdqf\") pod \"node-resolver-9c6p9\" (UID: \"ac07f9c0-4eff-4c84-8020-ae183619eae7\") " pod="openshift-dns/node-resolver-9c6p9" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.782465 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-netd\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.782497 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-etc-openvswitch\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.783174 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.783557 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fff69b03-aefa-4148-aa53-2d0f3501eafb-cni-binary-copy\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.783475 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-rootfs\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.783512 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-conf-dir\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.783367 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-tuning-conf-dir\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.784525 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-cni-dir\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.784606 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-socket-dir-parent\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.784639 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-var-lib-kubelet\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.784661 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-log-socket\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.784811 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.784874 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-env-overrides\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.784921 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/44a8652c-fec7-4403-8f80-37bae0514e16-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.784953 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-ovn-kubernetes\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.784982 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-netns\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785012 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-cni-dir\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785046 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-system-cni-dir\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785052 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-log-socket\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785011 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-system-cni-dir\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785099 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-etc-kubernetes\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785102 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-socket-dir-parent\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785123 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/dc07559e-a5c7-458c-b3ec-646981b798c1-cni-binary-copy\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785145 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5219b077-c7f8-41e9-831b-9b7dae574b9f-host\") pod \"node-ca-w9n9j\" (UID: \"5219b077-c7f8-41e9-831b-9b7dae574b9f\") " pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.785166 4751 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785187 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-kubelet\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.785207 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs podName:7da183a7-dcda-4e22-b135-b1ef0d593811 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:29.285194937 +0000 UTC m=+91.432209514 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs") pod "network-metrics-daemon-4bnbv" (UID: "7da183a7-dcda-4e22-b135-b1ef0d593811") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785226 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-etc-kubernetes\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785670 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-env-overrides\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785890 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/dc07559e-a5c7-458c-b3ec-646981b798c1-cni-binary-copy\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785947 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5219b077-c7f8-41e9-831b-9b7dae574b9f-host\") pod \"node-ca-w9n9j\" (UID: \"5219b077-c7f8-41e9-831b-9b7dae574b9f\") " pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785982 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-ovn-kubernetes\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785984 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-netns\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.785166 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-kubelet\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.786036 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-ovn\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.786063 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-os-release\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.786088 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-cnibin\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.786110 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-proxy-tls\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787652 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787680 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787696 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787742 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787757 4751 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787770 4751 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787784 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787795 4751 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787808 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787821 4751 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787835 4751 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787849 4751 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787862 4751 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787874 4751 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787885 4751 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787897 4751 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787910 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787922 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787934 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787947 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787961 4751 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787975 4751 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.787992 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.788005 4751 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.788018 4751 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.788032 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.788044 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.788055 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.786276 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-cnibin\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.786172 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-ovn\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.784956 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/dc07559e-a5c7-458c-b3ec-646981b798c1-host-var-lib-kubelet\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.786232 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fff69b03-aefa-4148-aa53-2d0f3501eafb-os-release\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.789777 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-config\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.791309 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-proxy-tls\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.792070 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/44a8652c-fec7-4403-8f80-37bae0514e16-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.793859 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/dc07559e-a5c7-458c-b3ec-646981b798c1-multus-daemon-config\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.797962 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm8jw\" (UniqueName: \"kubernetes.io/projected/45a3f89b-11cb-4336-962d-c6835c5f758e-kube-api-access-nm8jw\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.798002 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/45a3f89b-11cb-4336-962d-c6835c5f758e-ovn-node-metrics-cert\") pod \"ovnkube-node-vpxjd\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.799094 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mlg4\" (UniqueName: \"kubernetes.io/projected/fff69b03-aefa-4148-aa53-2d0f3501eafb-kube-api-access-4mlg4\") pod \"multus-additional-cni-plugins-zfn22\" (UID: \"fff69b03-aefa-4148-aa53-2d0f3501eafb\") " pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.799779 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/5219b077-c7f8-41e9-831b-9b7dae574b9f-serviceca\") pod \"node-ca-w9n9j\" (UID: \"5219b077-c7f8-41e9-831b-9b7dae574b9f\") " pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.802147 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.805133 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.805209 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.805281 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.805343 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.805415 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.807888 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qdqf\" (UniqueName: \"kubernetes.io/projected/ac07f9c0-4eff-4c84-8020-ae183619eae7-kube-api-access-4qdqf\") pod \"node-resolver-9c6p9\" (UID: \"ac07f9c0-4eff-4c84-8020-ae183619eae7\") " pod="openshift-dns/node-resolver-9c6p9" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.808000 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x8p6\" (UniqueName: \"kubernetes.io/projected/5219b077-c7f8-41e9-831b-9b7dae574b9f-kube-api-access-6x8p6\") pod \"node-ca-w9n9j\" (UID: \"5219b077-c7f8-41e9-831b-9b7dae574b9f\") " pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.808128 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnxq2\" (UniqueName: \"kubernetes.io/projected/dc07559e-a5c7-458c-b3ec-646981b798c1-kube-api-access-xnxq2\") pod \"multus-4jc4n\" (UID: \"dc07559e-a5c7-458c-b3ec-646981b798c1\") " pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.809450 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnvnb\" (UniqueName: \"kubernetes.io/projected/d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f-kube-api-access-fnvnb\") pod \"machine-config-daemon-rkcdq\" (UID: \"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\") " pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.809835 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25lx5\" (UniqueName: \"kubernetes.io/projected/44a8652c-fec7-4403-8f80-37bae0514e16-kube-api-access-25lx5\") pod \"ovnkube-control-plane-749d76644c-dvbld\" (UID: \"44a8652c-fec7-4403-8f80-37bae0514e16\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.811161 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.811891 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jp49\" (UniqueName: \"kubernetes.io/projected/7da183a7-dcda-4e22-b135-b1ef0d593811-kube-api-access-7jp49\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.821781 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.822945 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.832311 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.842735 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.846273 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.854288 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.857943 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.863845 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.874264 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.874422 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-4jc4n" Feb 27 16:25:28 crc kubenswrapper[4751]: W0227 16:25:28.877586 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-a45b4dd1786ee6473881711efbd463160d310a86c7b991afd80fd8dd6df65dd8 WatchSource:0}: Error finding container a45b4dd1786ee6473881711efbd463160d310a86c7b991afd80fd8dd6df65dd8: Status 404 returned error can't find the container with id a45b4dd1786ee6473881711efbd463160d310a86c7b991afd80fd8dd6df65dd8 Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.885896 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-zfn22" Feb 27 16:25:28 crc kubenswrapper[4751]: W0227 16:25:28.888701 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc07559e_a5c7_458c_b3ec_646981b798c1.slice/crio-01e3e051f27fe95182df74a694a97dd8c99d95e81733e2cd8401a3ab3b021c12 WatchSource:0}: Error finding container 01e3e051f27fe95182df74a694a97dd8c99d95e81733e2cd8401a3ab3b021c12: Status 404 returned error can't find the container with id 01e3e051f27fe95182df74a694a97dd8c99d95e81733e2cd8401a3ab3b021c12 Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.896715 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-9c6p9" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.906975 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.907064 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.907120 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.907206 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.907218 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.907287 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:28Z","lastTransitionTime":"2026-02-27T16:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.908727 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" event={"ID":"fff69b03-aefa-4148-aa53-2d0f3501eafb","Type":"ContainerStarted","Data":"379d80c51141ac9bd651da21ecd97eb99c908b75585be5154c463a3c039dae6b"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.912664 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a45b4dd1786ee6473881711efbd463160d310a86c7b991afd80fd8dd6df65dd8"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.913778 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"b0ec17afac064618cb8c99c614deeacaae83621ced06808df02dec2f9b3347c5"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.917040 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4jc4n" event={"ID":"dc07559e-a5c7-458c-b3ec-646981b798c1","Type":"ContainerStarted","Data":"01e3e051f27fe95182df74a694a97dd8c99d95e81733e2cd8401a3ab3b021c12"} Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.917158 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.919485 4751 scope.go:117] "RemoveContainer" containerID="cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.919514 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"b97df54bf32b1a57d8ffba37153d50651498e510bc0213b6fe1e485929490166"} Feb 27 16:25:28 crc kubenswrapper[4751]: E0227 16:25:28.919609 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.925987 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-w9n9j" Feb 27 16:25:28 crc kubenswrapper[4751]: I0227 16:25:28.933130 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.011824 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.011849 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.011857 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.011871 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.011879 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:29Z","lastTransitionTime":"2026-02-27T16:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.116072 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.116104 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.116114 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.116128 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.116143 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:29Z","lastTransitionTime":"2026-02-27T16:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.190819 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.190944 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.190974 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.191001 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.191052 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:25:30.191029322 +0000 UTC m=+92.338043769 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.191102 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.191117 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.191121 4751 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.191127 4751 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.191151 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:30.191145005 +0000 UTC m=+92.338159452 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.191164 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:30.191158635 +0000 UTC m=+92.338173082 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.191206 4751 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.191233 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:30.191221107 +0000 UTC m=+92.338235554 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.220652 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.220689 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.220698 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.220712 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.220724 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:29Z","lastTransitionTime":"2026-02-27T16:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.291804 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.291871 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.292006 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.292028 4751 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.292105 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs podName:7da183a7-dcda-4e22-b135-b1ef0d593811 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:30.292089305 +0000 UTC m=+92.439103762 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs") pod "network-metrics-daemon-4bnbv" (UID: "7da183a7-dcda-4e22-b135-b1ef0d593811") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.292031 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.292135 4751 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:29 crc kubenswrapper[4751]: E0227 16:25:29.292173 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:30.292164847 +0000 UTC m=+92.439179294 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.322683 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.322726 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.322739 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.322756 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.322767 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:29Z","lastTransitionTime":"2026-02-27T16:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.425028 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.425072 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.425083 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.425099 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.425113 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:29Z","lastTransitionTime":"2026-02-27T16:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.527027 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.527077 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.527091 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.527108 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.527120 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:29Z","lastTransitionTime":"2026-02-27T16:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.629063 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.629108 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.629119 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.629135 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.629146 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:29Z","lastTransitionTime":"2026-02-27T16:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.731671 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.731733 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.731750 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.731775 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.731793 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:29Z","lastTransitionTime":"2026-02-27T16:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.834798 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.834846 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.834863 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.834888 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.834906 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:29Z","lastTransitionTime":"2026-02-27T16:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.924694 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" event={"ID":"44a8652c-fec7-4403-8f80-37bae0514e16","Type":"ContainerStarted","Data":"8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.924745 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" event={"ID":"44a8652c-fec7-4403-8f80-37bae0514e16","Type":"ContainerStarted","Data":"950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.924755 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" event={"ID":"44a8652c-fec7-4403-8f80-37bae0514e16","Type":"ContainerStarted","Data":"cf75ea93061df41a24d4083d388553a2e3e9bf8f9b9b42b39145d017a9c55858"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.926427 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.928009 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-w9n9j" event={"ID":"5219b077-c7f8-41e9-831b-9b7dae574b9f","Type":"ContainerStarted","Data":"b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.928039 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-w9n9j" event={"ID":"5219b077-c7f8-41e9-831b-9b7dae574b9f","Type":"ContainerStarted","Data":"ddaf261c2f4b9ad1e6498a30337ba98f0c717f2fd50567b22e476b76f1b609b6"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.929522 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de" exitCode=0 Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.929604 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.929636 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"00077b69e6e8ec8364485f5a7131671543403b6040e8e2504d162bbcdceaf0bc"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.931736 4751 generic.go:334] "Generic (PLEG): container finished" podID="fff69b03-aefa-4148-aa53-2d0f3501eafb" containerID="3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a" exitCode=0 Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.931808 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" event={"ID":"fff69b03-aefa-4148-aa53-2d0f3501eafb","Type":"ContainerDied","Data":"3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.936762 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-9c6p9" event={"ID":"ac07f9c0-4eff-4c84-8020-ae183619eae7","Type":"ContainerStarted","Data":"0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.936814 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-9c6p9" event={"ID":"ac07f9c0-4eff-4c84-8020-ae183619eae7","Type":"ContainerStarted","Data":"041507a33fbc1f8d8ed16c4c725695577bc59af12edbb5c0a786c5633f5a6b80"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.938308 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.938389 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.938459 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.938505 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.938530 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:29Z","lastTransitionTime":"2026-02-27T16:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.944352 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4jc4n" event={"ID":"dc07559e-a5c7-458c-b3ec-646981b798c1","Type":"ContainerStarted","Data":"e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.947474 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.947504 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.952607 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.952652 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.952661 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"2542d500474e1ce110ffcf87677a0b2439d38d5544c9e02e2ed6d894531747b5"} Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.962148 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:29Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.982552 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:29Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:29 crc kubenswrapper[4751]: I0227 16:25:29.999065 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:29Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.021524 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.034330 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.042285 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.042325 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.042333 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.042346 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.042355 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:30Z","lastTransitionTime":"2026-02-27T16:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.049353 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.069113 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.080326 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.090530 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.105142 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.119967 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.132352 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.144607 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.144731 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.144757 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.144768 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.144782 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.144794 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:30Z","lastTransitionTime":"2026-02-27T16:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.158882 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.173072 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.185030 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.199587 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.201773 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.201878 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.201906 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.201944 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:25:32.201922879 +0000 UTC m=+94.348937346 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.201972 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.201991 4751 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.202089 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.202106 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.202120 4751 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.202155 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:32.202145695 +0000 UTC m=+94.349160152 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.202500 4751 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.202580 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:32.202562886 +0000 UTC m=+94.349577333 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.203276 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:32.203211044 +0000 UTC m=+94.350225511 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.216019 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.230848 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.244501 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.247093 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.247134 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.247150 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.247168 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.247176 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:30Z","lastTransitionTime":"2026-02-27T16:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.259989 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.283644 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.295998 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.302360 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.302430 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.302534 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.302550 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.302560 4751 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.302577 4751 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.302603 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:32.30259034 +0000 UTC m=+94.449604787 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.302650 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs podName:7da183a7-dcda-4e22-b135-b1ef0d593811 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:32.302630431 +0000 UTC m=+94.449644978 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs") pod "network-metrics-daemon-4bnbv" (UID: "7da183a7-dcda-4e22-b135-b1ef0d593811") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.314197 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.326030 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.337589 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.348982 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.354726 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.354764 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.354774 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.354789 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.354800 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:30Z","lastTransitionTime":"2026-02-27T16:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.363444 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.376248 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.386962 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.457536 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.457576 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.457588 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.457604 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.457617 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:30Z","lastTransitionTime":"2026-02-27T16:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.519586 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.519654 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.519708 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.519801 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.519883 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.519957 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.520012 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:30 crc kubenswrapper[4751]: E0227 16:25:30.520074 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.524829 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.525745 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.526662 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.527561 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.528360 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.529369 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.530164 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.530932 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.533602 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.534260 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.535539 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.536430 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.537732 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.538968 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.539700 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.540911 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.541699 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.542790 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.543605 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.544362 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.545506 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.546223 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.546839 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.548138 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.548802 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.550118 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.550940 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.552026 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.552885 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.553976 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.554629 4751 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.554755 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.557423 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.558137 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.558784 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.560634 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.560690 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.560705 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.560722 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.560773 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.561042 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:30Z","lastTransitionTime":"2026-02-27T16:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.562060 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.562782 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.564092 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.564954 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.566041 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.566893 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.568194 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.569545 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.570138 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.570840 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.572004 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.573534 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.574197 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.574848 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.575929 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.576719 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.577928 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.578557 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.663753 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.663968 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.664054 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.664137 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.664255 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:30Z","lastTransitionTime":"2026-02-27T16:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.767114 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.767285 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.767395 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.767516 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.767616 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:30Z","lastTransitionTime":"2026-02-27T16:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.870629 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.870664 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.870674 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.870688 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.870697 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:30Z","lastTransitionTime":"2026-02-27T16:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.957848 4751 generic.go:334] "Generic (PLEG): container finished" podID="fff69b03-aefa-4148-aa53-2d0f3501eafb" containerID="47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1" exitCode=0 Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.957911 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" event={"ID":"fff69b03-aefa-4148-aa53-2d0f3501eafb","Type":"ContainerDied","Data":"47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.973528 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.973569 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.973581 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.973701 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.973724 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.973734 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.973746 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.973759 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:30Z","lastTransitionTime":"2026-02-27T16:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.974459 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:30 crc kubenswrapper[4751]: I0227 16:25:30.988548 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.003322 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.018883 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.035168 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.047146 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.059783 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.075553 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.075776 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.075809 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.075822 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.075839 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.075855 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:31Z","lastTransitionTime":"2026-02-27T16:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.094473 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.109663 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.123483 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.131577 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.149465 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.163103 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.173160 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.178877 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.178935 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.178948 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.178967 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.178978 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:31Z","lastTransitionTime":"2026-02-27T16:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.281328 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.281667 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.281676 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.281693 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.281703 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:31Z","lastTransitionTime":"2026-02-27T16:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.384865 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.384924 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.384940 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.384963 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.384980 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:31Z","lastTransitionTime":"2026-02-27T16:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.487119 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.487216 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.487234 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.487261 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.487278 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:31Z","lastTransitionTime":"2026-02-27T16:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.590125 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.590183 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.590200 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.590222 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.590238 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:31Z","lastTransitionTime":"2026-02-27T16:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.693639 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.693729 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.693751 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.693780 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.693801 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:31Z","lastTransitionTime":"2026-02-27T16:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.796893 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.796934 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.796942 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.796956 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.796966 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:31Z","lastTransitionTime":"2026-02-27T16:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.900115 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.900172 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.900192 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.900219 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.900237 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:31Z","lastTransitionTime":"2026-02-27T16:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.978256 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.982545 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.982600 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.982631 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d"} Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.985086 4751 generic.go:334] "Generic (PLEG): container finished" podID="fff69b03-aefa-4148-aa53-2d0f3501eafb" containerID="3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad" exitCode=0 Feb 27 16:25:31 crc kubenswrapper[4751]: I0227 16:25:31.985141 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" event={"ID":"fff69b03-aefa-4148-aa53-2d0f3501eafb","Type":"ContainerDied","Data":"3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.001307 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:31Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.002922 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.002968 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.002984 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.003006 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.003023 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:32Z","lastTransitionTime":"2026-02-27T16:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.021538 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.045929 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.082227 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.101931 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.105203 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.105246 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.105257 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.105275 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.105285 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:32Z","lastTransitionTime":"2026-02-27T16:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.122772 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.140624 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.155475 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.169432 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.185182 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.196485 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.210769 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.214312 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.214356 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.214369 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.214388 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.214422 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:32Z","lastTransitionTime":"2026-02-27T16:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.223747 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.223799 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.223968 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.224006 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.224046 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.224134 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:25:36.224114303 +0000 UTC m=+98.371128760 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.224197 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.224217 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.224231 4751 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.224238 4751 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.224246 4751 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.224278 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:36.224262977 +0000 UTC m=+98.371277434 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.224298 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:36.224290228 +0000 UTC m=+98.371304685 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.224338 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:36.224314688 +0000 UTC m=+98.371329145 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.241368 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.254001 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.266552 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.282940 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.299194 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.312461 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.316361 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.316463 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.316487 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.316516 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.316539 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:32Z","lastTransitionTime":"2026-02-27T16:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.325617 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.325707 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.325819 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.325853 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.325867 4751 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.325910 4751 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.325931 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:36.325911866 +0000 UTC m=+98.472926383 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.325999 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs podName:7da183a7-dcda-4e22-b135-b1ef0d593811 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:36.325965857 +0000 UTC m=+98.472980344 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs") pod "network-metrics-daemon-4bnbv" (UID: "7da183a7-dcda-4e22-b135-b1ef0d593811") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.339022 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.353249 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.366702 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.380103 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.390922 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.398938 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.413812 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.421804 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.421843 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.421854 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.421872 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.421886 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:32Z","lastTransitionTime":"2026-02-27T16:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.428804 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.442577 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.456737 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.466683 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:32Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.520617 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.520678 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.520617 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.520772 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.520940 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.521015 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.520939 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:32 crc kubenswrapper[4751]: E0227 16:25:32.521254 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.525294 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.525420 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.525524 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.525601 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.525662 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:32Z","lastTransitionTime":"2026-02-27T16:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.629738 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.629828 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.629846 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.629871 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.629889 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:32Z","lastTransitionTime":"2026-02-27T16:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.733171 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.733236 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.733258 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.733286 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.733307 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:32Z","lastTransitionTime":"2026-02-27T16:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.836233 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.836293 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.836309 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.836334 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.836351 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:32Z","lastTransitionTime":"2026-02-27T16:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.939446 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.939504 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.939524 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.939547 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.939564 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:32Z","lastTransitionTime":"2026-02-27T16:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.992026 4751 generic.go:334] "Generic (PLEG): container finished" podID="fff69b03-aefa-4148-aa53-2d0f3501eafb" containerID="4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4" exitCode=0 Feb 27 16:25:32 crc kubenswrapper[4751]: I0227 16:25:32.992085 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" event={"ID":"fff69b03-aefa-4148-aa53-2d0f3501eafb","Type":"ContainerDied","Data":"4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4"} Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.018293 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.042467 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.042515 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.042596 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.042629 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.042646 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:33Z","lastTransitionTime":"2026-02-27T16:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.047087 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.064431 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.095611 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.122608 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.136375 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.145628 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.145689 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.145714 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.145744 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.145767 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:33Z","lastTransitionTime":"2026-02-27T16:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.150385 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.171391 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.188179 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.211988 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.227715 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.239702 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.248598 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.248641 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.248655 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.248675 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.248687 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:33Z","lastTransitionTime":"2026-02-27T16:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.251879 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.262827 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.278694 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:33Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.350786 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.350824 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.350834 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.350848 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.350859 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:33Z","lastTransitionTime":"2026-02-27T16:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.453600 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.453637 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.453649 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.453668 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.453680 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:33Z","lastTransitionTime":"2026-02-27T16:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.556213 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.556257 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.556271 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.556287 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.556298 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:33Z","lastTransitionTime":"2026-02-27T16:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.658889 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.659307 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.659325 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.659351 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.659368 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:33Z","lastTransitionTime":"2026-02-27T16:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.762759 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.762833 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.762853 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.762880 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.762901 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:33Z","lastTransitionTime":"2026-02-27T16:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.865247 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.865308 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.865327 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.865352 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.865370 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:33Z","lastTransitionTime":"2026-02-27T16:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.968814 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.968870 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.968887 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.968912 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:33 crc kubenswrapper[4751]: I0227 16:25:33.968930 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:33Z","lastTransitionTime":"2026-02-27T16:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.000952 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.004491 4751 generic.go:334] "Generic (PLEG): container finished" podID="fff69b03-aefa-4148-aa53-2d0f3501eafb" containerID="7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d" exitCode=0 Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.004553 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" event={"ID":"fff69b03-aefa-4148-aa53-2d0f3501eafb","Type":"ContainerDied","Data":"7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.031961 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.049747 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.072192 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.072240 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.072252 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.072271 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.072282 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:34Z","lastTransitionTime":"2026-02-27T16:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.073734 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.106945 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.124691 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.141542 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.159523 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.173814 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.174808 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.174851 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.174860 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.174874 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.174885 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:34Z","lastTransitionTime":"2026-02-27T16:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.187240 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.196565 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.206753 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.218700 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.233877 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.247741 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.259227 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:34Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.277290 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.277326 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.277341 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.277355 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.277365 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:34Z","lastTransitionTime":"2026-02-27T16:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.379480 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.379531 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.379546 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.379568 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.379582 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:34Z","lastTransitionTime":"2026-02-27T16:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.482578 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.482716 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.482734 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.482760 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.482778 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:34Z","lastTransitionTime":"2026-02-27T16:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.520335 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.520375 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:34 crc kubenswrapper[4751]: E0227 16:25:34.520502 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.520348 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.520556 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:34 crc kubenswrapper[4751]: E0227 16:25:34.520898 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:34 crc kubenswrapper[4751]: E0227 16:25:34.520994 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:34 crc kubenswrapper[4751]: E0227 16:25:34.521031 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.586529 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.586594 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.586612 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.586637 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.586657 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:34Z","lastTransitionTime":"2026-02-27T16:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.690370 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.690452 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.690470 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.690495 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.690511 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:34Z","lastTransitionTime":"2026-02-27T16:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.793110 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.793184 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.793207 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.793235 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.793257 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:34Z","lastTransitionTime":"2026-02-27T16:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.895852 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.895928 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.895956 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.895984 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:34 crc kubenswrapper[4751]: I0227 16:25:34.896004 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:34Z","lastTransitionTime":"2026-02-27T16:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.001698 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.001731 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.001743 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.001760 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.001774 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:35Z","lastTransitionTime":"2026-02-27T16:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.013111 4751 generic.go:334] "Generic (PLEG): container finished" podID="fff69b03-aefa-4148-aa53-2d0f3501eafb" containerID="1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3" exitCode=0 Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.013201 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" event={"ID":"fff69b03-aefa-4148-aa53-2d0f3501eafb","Type":"ContainerDied","Data":"1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.032758 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.045805 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.058797 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.072618 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.092385 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.103807 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.103849 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.103862 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.103879 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.103891 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:35Z","lastTransitionTime":"2026-02-27T16:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.104646 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.116605 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.129753 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.144615 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.158208 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.181219 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.196277 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.205528 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.205570 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.205579 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.205594 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.205605 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:35Z","lastTransitionTime":"2026-02-27T16:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.207602 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.218647 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.246840 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:35Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.308064 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.308097 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.308106 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.308118 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.308126 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:35Z","lastTransitionTime":"2026-02-27T16:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.410108 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.410166 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.410192 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.410226 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.410249 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:35Z","lastTransitionTime":"2026-02-27T16:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.513132 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.513196 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.513214 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.513273 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.513290 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:35Z","lastTransitionTime":"2026-02-27T16:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.616062 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.616550 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.616570 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.616591 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.616608 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:35Z","lastTransitionTime":"2026-02-27T16:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.719013 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.719096 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.719121 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.719151 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.719174 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:35Z","lastTransitionTime":"2026-02-27T16:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.821955 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.822017 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.822034 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.822062 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.822080 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:35Z","lastTransitionTime":"2026-02-27T16:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.925010 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.925059 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.925073 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.925090 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:35 crc kubenswrapper[4751]: I0227 16:25:35.925106 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:35Z","lastTransitionTime":"2026-02-27T16:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.024168 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.024450 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.027225 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.027262 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.027274 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.027291 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.027302 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:36Z","lastTransitionTime":"2026-02-27T16:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.029876 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" event={"ID":"fff69b03-aefa-4148-aa53-2d0f3501eafb","Type":"ContainerStarted","Data":"ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.046937 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.059353 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.061985 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.076377 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.104621 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.122047 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.130003 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.130060 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.130071 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.130087 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.130096 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:36Z","lastTransitionTime":"2026-02-27T16:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.147637 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.166796 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.187569 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.205128 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.224190 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.235555 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.235588 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.235599 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.235617 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.235628 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:36Z","lastTransitionTime":"2026-02-27T16:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.238379 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.252697 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.265564 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.268193 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.268338 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.268385 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.268497 4751 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.268540 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:25:44.268472321 +0000 UTC m=+106.415486808 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.268626 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:44.268580184 +0000 UTC m=+106.415594671 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.268785 4751 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.268856 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.269042 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:44.269024116 +0000 UTC m=+106.416038593 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.269252 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.269320 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.269341 4751 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.269485 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:44.269463638 +0000 UTC m=+106.416478225 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.279535 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.295268 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.307129 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.322696 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.334938 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.338346 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.338388 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.338409 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.338424 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.338438 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:36Z","lastTransitionTime":"2026-02-27T16:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.351046 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.361354 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.370291 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.370336 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.370353 4751 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.370442 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:44.370421578 +0000 UTC m=+106.517436025 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.370086 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.370954 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.371062 4751 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.371105 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs podName:7da183a7-dcda-4e22-b135-b1ef0d593811 nodeName:}" failed. No retries permitted until 2026-02-27 16:25:44.371094897 +0000 UTC m=+106.518109344 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs") pod "network-metrics-daemon-4bnbv" (UID: "7da183a7-dcda-4e22-b135-b1ef0d593811") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.375282 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.399542 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.411310 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.427392 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.440691 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.440760 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.440772 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.440790 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.440801 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:36Z","lastTransitionTime":"2026-02-27T16:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.441165 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.458390 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.474022 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.489549 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.501271 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.514948 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:36Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.523461 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.523638 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.524166 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.524258 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.524316 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.524371 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.524436 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:36 crc kubenswrapper[4751]: E0227 16:25:36.524487 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.543597 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.543665 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.543675 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.543688 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.543698 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:36Z","lastTransitionTime":"2026-02-27T16:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.646771 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.646869 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.646893 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.646922 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.646946 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:36Z","lastTransitionTime":"2026-02-27T16:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.749638 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.749691 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.749710 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.749734 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.749750 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:36Z","lastTransitionTime":"2026-02-27T16:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.852261 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.852294 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.852305 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.852322 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.852334 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:36Z","lastTransitionTime":"2026-02-27T16:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.954352 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.954384 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.954397 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.954439 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:36 crc kubenswrapper[4751]: I0227 16:25:36.954450 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:36Z","lastTransitionTime":"2026-02-27T16:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.033150 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.033718 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.057934 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.057963 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.057974 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.057988 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.058000 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:37Z","lastTransitionTime":"2026-02-27T16:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.069733 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.089268 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.103945 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.123546 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.142787 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.164261 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.168330 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.168394 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.168432 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.168456 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.168501 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:37Z","lastTransitionTime":"2026-02-27T16:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.194933 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.211592 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.232296 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.256516 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.272263 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.272296 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.272305 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.272318 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.272328 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:37Z","lastTransitionTime":"2026-02-27T16:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.276309 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.297963 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.317750 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.332154 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.344741 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.359865 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:37Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.375538 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.375608 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.375626 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.375652 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.375670 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:37Z","lastTransitionTime":"2026-02-27T16:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.478669 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.478733 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.478755 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.478811 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.478836 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:37Z","lastTransitionTime":"2026-02-27T16:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.581863 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.581904 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.581912 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.581927 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.581937 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:37Z","lastTransitionTime":"2026-02-27T16:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.683992 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.684033 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.684041 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.684056 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.684065 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:37Z","lastTransitionTime":"2026-02-27T16:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.786203 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.786238 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.786246 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.786260 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.786268 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:37Z","lastTransitionTime":"2026-02-27T16:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.888339 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.888377 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.888390 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.888423 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.888436 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:37Z","lastTransitionTime":"2026-02-27T16:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.991258 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.991300 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.991311 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.991327 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:37 crc kubenswrapper[4751]: I0227 16:25:37.991338 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:37Z","lastTransitionTime":"2026-02-27T16:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.094384 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.094448 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.094461 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.094483 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.094497 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.200766 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.200852 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.200875 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.200905 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.200926 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.304372 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.304482 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.304510 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.304539 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.304571 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.407603 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.407660 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.407676 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.407696 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.407711 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.510849 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.510900 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.510913 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.510932 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.510943 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.520354 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.520377 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.520468 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:38 crc kubenswrapper[4751]: E0227 16:25:38.520610 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.520809 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:38 crc kubenswrapper[4751]: E0227 16:25:38.521064 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:38 crc kubenswrapper[4751]: E0227 16:25:38.521196 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:38 crc kubenswrapper[4751]: E0227 16:25:38.521300 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.538303 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.552754 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.575260 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.599834 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.617343 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.617387 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.617419 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.617437 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.617449 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.635036 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.655214 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.672840 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.679549 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.679640 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.679663 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.679694 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.679720 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.690495 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: E0227 16:25:38.700255 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.704020 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.704223 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.704346 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.704500 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.704588 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.705464 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: E0227 16:25:38.720920 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.725587 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.725862 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.726087 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.726286 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.726488 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.726492 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.739952 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: E0227 16:25:38.740261 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.744795 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.744978 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.745060 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.745148 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.745256 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.752012 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: E0227 16:25:38.758298 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.761861 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.762004 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.762021 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.762039 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.762052 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.767125 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: E0227 16:25:38.774681 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: E0227 16:25:38.775039 4751 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.776564 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.776684 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.776803 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.776887 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.776961 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.782628 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.797388 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:38Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.879962 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.880021 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.880031 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.880052 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.880064 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.983350 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.983653 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.983780 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.983875 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:38 crc kubenswrapper[4751]: I0227 16:25:38.983955 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:38Z","lastTransitionTime":"2026-02-27T16:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.042367 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/0.log" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.046637 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3" exitCode=1 Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.046768 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3"} Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.047865 4751 scope.go:117] "RemoveContainer" containerID="9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.070162 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.089624 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.089680 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.089747 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.089776 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.089794 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:39Z","lastTransitionTime":"2026-02-27T16:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.090922 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.104300 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.120733 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.133722 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.146490 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.168273 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.185375 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.192389 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.192487 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.192511 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.192540 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.192562 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:39Z","lastTransitionTime":"2026-02-27T16:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.199772 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.219106 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.236010 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.253247 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.276648 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.295618 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.295676 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.295700 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.295730 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.295752 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:39Z","lastTransitionTime":"2026-02-27T16:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.302539 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"227 16:25:38.530877 6567 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0227 16:25:38.530908 6567 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0227 16:25:38.530918 6567 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0227 16:25:38.531519 6567 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0227 16:25:38.531571 6567 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0227 16:25:38.531581 6567 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0227 16:25:38.531630 6567 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0227 16:25:38.531655 6567 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0227 16:25:38.531676 6567 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0227 16:25:38.531688 6567 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0227 16:25:38.531697 6567 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0227 16:25:38.531706 6567 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0227 16:25:38.531755 6567 factory.go:656] Stopping watch factory\\\\nI0227 16:25:38.531768 6567 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0227 16:25:38.531780 6567 ovnkube.go:599] Stopped ovnkube\\\\nI0227 16:25:38.531782 6567 handler.go:208] Removed *v1.Node event handler 2\\\\nI02\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.319334 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:39Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.398741 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.398819 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.398840 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.398873 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.398895 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:39Z","lastTransitionTime":"2026-02-27T16:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.501602 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.501658 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.501674 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.501698 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.501720 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:39Z","lastTransitionTime":"2026-02-27T16:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.604954 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.604994 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.605007 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.605023 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.605035 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:39Z","lastTransitionTime":"2026-02-27T16:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.706840 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.706878 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.706886 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.706901 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.706910 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:39Z","lastTransitionTime":"2026-02-27T16:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.809064 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.809099 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.809111 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.809127 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.809140 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:39Z","lastTransitionTime":"2026-02-27T16:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.911588 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.911639 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.911651 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.911669 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:39 crc kubenswrapper[4751]: I0227 16:25:39.911681 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:39Z","lastTransitionTime":"2026-02-27T16:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.014430 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.014472 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.014484 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.014505 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.014517 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:40Z","lastTransitionTime":"2026-02-27T16:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.053803 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/0.log" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.058808 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.059328 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.078192 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.091947 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.117220 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.117303 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.117324 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.117349 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.117367 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:40Z","lastTransitionTime":"2026-02-27T16:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.117489 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.153301 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"227 16:25:38.530877 6567 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0227 16:25:38.530908 6567 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0227 16:25:38.530918 6567 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0227 16:25:38.531519 6567 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0227 16:25:38.531571 6567 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0227 16:25:38.531581 6567 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0227 16:25:38.531630 6567 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0227 16:25:38.531655 6567 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0227 16:25:38.531676 6567 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0227 16:25:38.531688 6567 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0227 16:25:38.531697 6567 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0227 16:25:38.531706 6567 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0227 16:25:38.531755 6567 factory.go:656] Stopping watch factory\\\\nI0227 16:25:38.531768 6567 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0227 16:25:38.531780 6567 ovnkube.go:599] Stopped ovnkube\\\\nI0227 16:25:38.531782 6567 handler.go:208] Removed *v1.Node event handler 2\\\\nI02\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.170058 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.181975 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.203390 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.216916 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.219352 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.219373 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.219381 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.219395 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.219420 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:40Z","lastTransitionTime":"2026-02-27T16:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.228247 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.243253 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.258323 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.269797 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.284179 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.300340 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.315033 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:40Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.321747 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.321781 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.321791 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.321807 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.321820 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:40Z","lastTransitionTime":"2026-02-27T16:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.424386 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.424463 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.424480 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.424500 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.424513 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:40Z","lastTransitionTime":"2026-02-27T16:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.520695 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.520716 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.520740 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:40 crc kubenswrapper[4751]: E0227 16:25:40.520833 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:40 crc kubenswrapper[4751]: E0227 16:25:40.520956 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.521020 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:40 crc kubenswrapper[4751]: E0227 16:25:40.521030 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:40 crc kubenswrapper[4751]: E0227 16:25:40.521195 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.526134 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.526165 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.526173 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.526188 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.526197 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:40Z","lastTransitionTime":"2026-02-27T16:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.628126 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.628175 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.628187 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.628204 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.628217 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:40Z","lastTransitionTime":"2026-02-27T16:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.730920 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.730999 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.731021 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.731044 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.731064 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:40Z","lastTransitionTime":"2026-02-27T16:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.833963 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.834007 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.834017 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.834036 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.834047 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:40Z","lastTransitionTime":"2026-02-27T16:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.937485 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.937546 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.937565 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.937589 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:40 crc kubenswrapper[4751]: I0227 16:25:40.937608 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:40Z","lastTransitionTime":"2026-02-27T16:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.042214 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.042264 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.042280 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.042302 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.042318 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:41Z","lastTransitionTime":"2026-02-27T16:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.065003 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/1.log" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.065957 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/0.log" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.070008 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a" exitCode=1 Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.070075 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.070142 4751 scope.go:117] "RemoveContainer" containerID="9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.073066 4751 scope.go:117] "RemoveContainer" containerID="c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a" Feb 27 16:25:41 crc kubenswrapper[4751]: E0227 16:25:41.073478 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.090522 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.107650 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.125129 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.144840 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.144895 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.144911 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.144936 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.144954 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:41Z","lastTransitionTime":"2026-02-27T16:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.147902 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.167858 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.190957 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.223370 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df823919b7e9b08f5f5dceace3644d3ddec4f10ea2e754d17cabf45bf4267d3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:38Z\\\",\\\"message\\\":\\\"227 16:25:38.530877 6567 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0227 16:25:38.530908 6567 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0227 16:25:38.530918 6567 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0227 16:25:38.531519 6567 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0227 16:25:38.531571 6567 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0227 16:25:38.531581 6567 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0227 16:25:38.531630 6567 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0227 16:25:38.531655 6567 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0227 16:25:38.531676 6567 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0227 16:25:38.531688 6567 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0227 16:25:38.531697 6567 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0227 16:25:38.531706 6567 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0227 16:25:38.531755 6567 factory.go:656] Stopping watch factory\\\\nI0227 16:25:38.531768 6567 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0227 16:25:38.531780 6567 ovnkube.go:599] Stopped ovnkube\\\\nI0227 16:25:38.531782 6567 handler.go:208] Removed *v1.Node event handler 2\\\\nI02\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:40Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI0227 16:25:40.169606 6706 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI0227 16:25:40.169622 6706 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI0227 16:25:40.169666 6706 factory.go:1336] Added *v1.Node event handler 7\\\\nI0227 16:25:40.169707 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0227 16:25:40.169722 6706 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0227 16:25:40.169746 6706 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI0227 16:25:40.169765 6706 factory.go:656] Stopping watch factory\\\\nI0227 16:25:40.169782 6706 handler.go:208] Removed *v1.Node event handler 7\\\\nI0227 16:25:40.169791 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI0227 16:25:40.170077 6706 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0227 16:25:40.170143 6706 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0227 16:25:40.170173 6706 ovnkube.go:599] Stopped ovnkube\\\\nI0227 16:25:40.170191 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0227 16:25:40.170291 6706 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.248610 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.250295 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.250366 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.250384 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.250443 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.250468 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:41Z","lastTransitionTime":"2026-02-27T16:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.264943 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.287068 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.306962 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.325710 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.343434 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.353019 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.353073 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.353096 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.353127 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.353151 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:41Z","lastTransitionTime":"2026-02-27T16:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.364064 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.382197 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:41Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.455819 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.455874 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.455892 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.455919 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.455937 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:41Z","lastTransitionTime":"2026-02-27T16:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.558660 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.558719 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.558743 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.558770 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.558794 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:41Z","lastTransitionTime":"2026-02-27T16:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.662062 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.662151 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.662171 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.663619 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.663640 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:41Z","lastTransitionTime":"2026-02-27T16:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.767120 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.767204 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.767224 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.767256 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.767278 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:41Z","lastTransitionTime":"2026-02-27T16:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.871548 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.871613 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.871632 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.871677 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.871697 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:41Z","lastTransitionTime":"2026-02-27T16:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.974835 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.974911 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.974924 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.974944 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:41 crc kubenswrapper[4751]: I0227 16:25:41.974963 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:41Z","lastTransitionTime":"2026-02-27T16:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.078142 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/1.log" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.078300 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.078558 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.078721 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.078758 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.078802 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:42Z","lastTransitionTime":"2026-02-27T16:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.084237 4751 scope.go:117] "RemoveContainer" containerID="c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a" Feb 27 16:25:42 crc kubenswrapper[4751]: E0227 16:25:42.084893 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.104633 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.122203 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.139313 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.164961 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.181380 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.181460 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.181472 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.181495 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.181510 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:42Z","lastTransitionTime":"2026-02-27T16:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.199174 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:40Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI0227 16:25:40.169606 6706 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI0227 16:25:40.169622 6706 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI0227 16:25:40.169666 6706 factory.go:1336] Added *v1.Node event handler 7\\\\nI0227 16:25:40.169707 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0227 16:25:40.169722 6706 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0227 16:25:40.169746 6706 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI0227 16:25:40.169765 6706 factory.go:656] Stopping watch factory\\\\nI0227 16:25:40.169782 6706 handler.go:208] Removed *v1.Node event handler 7\\\\nI0227 16:25:40.169791 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI0227 16:25:40.170077 6706 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0227 16:25:40.170143 6706 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0227 16:25:40.170173 6706 ovnkube.go:599] Stopped ovnkube\\\\nI0227 16:25:40.170191 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0227 16:25:40.170291 6706 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.216570 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.239177 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.260206 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.281146 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.284828 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.284890 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.284908 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.284934 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.284953 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:42Z","lastTransitionTime":"2026-02-27T16:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.303006 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.317123 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.330086 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.347685 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.368044 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.383927 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:42Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.387568 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.387606 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.387653 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.387675 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.387690 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:42Z","lastTransitionTime":"2026-02-27T16:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.491149 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.491205 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.491222 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.491246 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.491266 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:42Z","lastTransitionTime":"2026-02-27T16:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.797760 4751 scope.go:117] "RemoveContainer" containerID="cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.798366 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:42 crc kubenswrapper[4751]: E0227 16:25:42.798521 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.798630 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:42 crc kubenswrapper[4751]: E0227 16:25:42.798705 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.798835 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:42 crc kubenswrapper[4751]: E0227 16:25:42.798978 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.799301 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:42 crc kubenswrapper[4751]: E0227 16:25:42.799481 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.799976 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.800053 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.800077 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.800110 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.800133 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:42Z","lastTransitionTime":"2026-02-27T16:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.903844 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.903887 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.903896 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.903908 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:42 crc kubenswrapper[4751]: I0227 16:25:42.903917 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:42Z","lastTransitionTime":"2026-02-27T16:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.007656 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.008475 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.008543 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.008576 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.008595 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:43Z","lastTransitionTime":"2026-02-27T16:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.089063 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/3.log" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.091349 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.091634 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.107462 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.112537 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.112580 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.112594 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.112612 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.112624 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:43Z","lastTransitionTime":"2026-02-27T16:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.123219 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.140712 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.155933 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.174151 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.194929 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.220440 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.220486 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.220503 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.220526 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.220543 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:43Z","lastTransitionTime":"2026-02-27T16:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.235829 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:40Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI0227 16:25:40.169606 6706 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI0227 16:25:40.169622 6706 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI0227 16:25:40.169666 6706 factory.go:1336] Added *v1.Node event handler 7\\\\nI0227 16:25:40.169707 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0227 16:25:40.169722 6706 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0227 16:25:40.169746 6706 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI0227 16:25:40.169765 6706 factory.go:656] Stopping watch factory\\\\nI0227 16:25:40.169782 6706 handler.go:208] Removed *v1.Node event handler 7\\\\nI0227 16:25:40.169791 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI0227 16:25:40.170077 6706 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0227 16:25:40.170143 6706 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0227 16:25:40.170173 6706 ovnkube.go:599] Stopped ovnkube\\\\nI0227 16:25:40.170191 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0227 16:25:40.170291 6706 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.258318 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.281332 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.301665 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.317811 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.328029 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.328072 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.328107 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.328127 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.328153 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:43Z","lastTransitionTime":"2026-02-27T16:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.331632 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.352897 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.372776 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.386053 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:43Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.431166 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.431226 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.431244 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.431270 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.431286 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:43Z","lastTransitionTime":"2026-02-27T16:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.534365 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.534745 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.534875 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.534998 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.535161 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:43Z","lastTransitionTime":"2026-02-27T16:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.638963 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.639210 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.639357 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.639567 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.639793 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:43Z","lastTransitionTime":"2026-02-27T16:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.743847 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.743904 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.743915 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.743932 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.743943 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:43Z","lastTransitionTime":"2026-02-27T16:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.847889 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.847957 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.847975 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.848000 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.848019 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:43Z","lastTransitionTime":"2026-02-27T16:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.950555 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.950603 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.950616 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.950634 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:43 crc kubenswrapper[4751]: I0227 16:25:43.950647 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:43Z","lastTransitionTime":"2026-02-27T16:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.053644 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.053699 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.053717 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.053741 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.053758 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:44Z","lastTransitionTime":"2026-02-27T16:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.156825 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.156867 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.156883 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.156905 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.156923 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:44Z","lastTransitionTime":"2026-02-27T16:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.259905 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.259970 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.259988 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.260015 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.260037 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:44Z","lastTransitionTime":"2026-02-27T16:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.311866 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.311991 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.312029 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.312063 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.312199 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.312221 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.312239 4751 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.312288 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-27 16:26:00.312272589 +0000 UTC m=+122.459287046 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.312681 4751 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.312726 4751 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.312745 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:26:00.312731612 +0000 UTC m=+122.459746069 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.312956 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:26:00.312920087 +0000 UTC m=+122.459934594 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.313003 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:26:00.312985569 +0000 UTC m=+122.460000126 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.363307 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.363388 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.363478 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.363512 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.363538 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:44Z","lastTransitionTime":"2026-02-27T16:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.413302 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.413390 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.413584 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.413597 4751 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.413621 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.413643 4751 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.413674 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs podName:7da183a7-dcda-4e22-b135-b1ef0d593811 nodeName:}" failed. No retries permitted until 2026-02-27 16:26:00.413653221 +0000 UTC m=+122.560667708 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs") pod "network-metrics-daemon-4bnbv" (UID: "7da183a7-dcda-4e22-b135-b1ef0d593811") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.413704 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-27 16:26:00.413685972 +0000 UTC m=+122.560700459 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.472174 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.472249 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.472268 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.472294 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.472319 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:44Z","lastTransitionTime":"2026-02-27T16:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.520536 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.520613 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.520722 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.520791 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.520939 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.520554 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.521079 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:44 crc kubenswrapper[4751]: E0227 16:25:44.521186 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.575796 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.576012 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.576077 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.576155 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.576224 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:44Z","lastTransitionTime":"2026-02-27T16:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.679646 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.679715 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.679733 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.679759 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.679777 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:44Z","lastTransitionTime":"2026-02-27T16:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.782780 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.782822 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.782832 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.782852 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.782862 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:44Z","lastTransitionTime":"2026-02-27T16:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.885889 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.885944 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.885955 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.886003 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.886017 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:44Z","lastTransitionTime":"2026-02-27T16:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.989124 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.989165 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.989175 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.989191 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:44 crc kubenswrapper[4751]: I0227 16:25:44.989203 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:44Z","lastTransitionTime":"2026-02-27T16:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.092079 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.092129 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.092140 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.092157 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.092169 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:45Z","lastTransitionTime":"2026-02-27T16:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.195200 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.195238 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.195351 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.195373 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.195388 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:45Z","lastTransitionTime":"2026-02-27T16:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.297976 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.298063 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.298083 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.298111 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.298132 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:45Z","lastTransitionTime":"2026-02-27T16:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.401969 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.402053 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.402077 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.402105 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.402128 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:45Z","lastTransitionTime":"2026-02-27T16:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.505843 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.505901 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.505915 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.505933 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.505945 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:45Z","lastTransitionTime":"2026-02-27T16:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.608578 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.608684 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.608705 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.608729 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.608746 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:45Z","lastTransitionTime":"2026-02-27T16:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.712268 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.712325 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.712341 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.712362 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.712379 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:45Z","lastTransitionTime":"2026-02-27T16:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.815919 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.816206 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.816345 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.816517 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.816650 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:45Z","lastTransitionTime":"2026-02-27T16:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.920215 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.920350 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.920376 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.920428 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:45 crc kubenswrapper[4751]: I0227 16:25:45.920447 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:45Z","lastTransitionTime":"2026-02-27T16:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.023913 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.024036 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.024065 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.024142 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.024165 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:46Z","lastTransitionTime":"2026-02-27T16:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.126965 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.127040 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.127062 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.127094 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.127115 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:46Z","lastTransitionTime":"2026-02-27T16:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.230619 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.230661 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.230669 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.230685 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.230696 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:46Z","lastTransitionTime":"2026-02-27T16:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.333225 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.333280 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.333292 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.333310 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.333322 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:46Z","lastTransitionTime":"2026-02-27T16:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.435498 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.435555 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.435572 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.435597 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.435618 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:46Z","lastTransitionTime":"2026-02-27T16:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.519968 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.520036 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.520003 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:46 crc kubenswrapper[4751]: E0227 16:25:46.520259 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:46 crc kubenswrapper[4751]: E0227 16:25:46.520141 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:46 crc kubenswrapper[4751]: E0227 16:25:46.520474 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.520508 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:46 crc kubenswrapper[4751]: E0227 16:25:46.520679 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.538152 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.538206 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.538217 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.538235 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.538275 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:46Z","lastTransitionTime":"2026-02-27T16:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.646497 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.646568 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.646580 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.646599 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.646610 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:46Z","lastTransitionTime":"2026-02-27T16:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.749186 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.749232 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.749245 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.749259 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.749289 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:46Z","lastTransitionTime":"2026-02-27T16:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.853114 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.853181 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.853205 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.853250 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.853274 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:46Z","lastTransitionTime":"2026-02-27T16:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.956396 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.956494 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.956521 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.956551 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:46 crc kubenswrapper[4751]: I0227 16:25:46.956575 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:46Z","lastTransitionTime":"2026-02-27T16:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.060213 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.060305 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.060334 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.060365 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.060490 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:47Z","lastTransitionTime":"2026-02-27T16:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.163652 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.163809 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.163833 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.163866 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.163890 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:47Z","lastTransitionTime":"2026-02-27T16:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.266966 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.267027 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.267045 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.267072 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.267090 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:47Z","lastTransitionTime":"2026-02-27T16:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.370451 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.370526 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.370550 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.370583 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.370603 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:47Z","lastTransitionTime":"2026-02-27T16:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.473986 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.474049 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.474066 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.474091 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.474108 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:47Z","lastTransitionTime":"2026-02-27T16:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.548015 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.577455 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.577540 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.577565 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.577598 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.577616 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:47Z","lastTransitionTime":"2026-02-27T16:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.680323 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.680441 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.680468 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.680497 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.680520 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:47Z","lastTransitionTime":"2026-02-27T16:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.783772 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.783846 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.783867 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.783892 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.783911 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:47Z","lastTransitionTime":"2026-02-27T16:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.887504 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.887570 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.887584 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.887608 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.887623 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:47Z","lastTransitionTime":"2026-02-27T16:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.991602 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.991673 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.991693 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.991726 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:47 crc kubenswrapper[4751]: I0227 16:25:47.991751 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:47Z","lastTransitionTime":"2026-02-27T16:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.094331 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.094377 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.094389 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.094442 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.094455 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:48Z","lastTransitionTime":"2026-02-27T16:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.198196 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.198263 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.198278 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.198296 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.198309 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:48Z","lastTransitionTime":"2026-02-27T16:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.301077 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.301145 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.301168 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.301199 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.301224 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:48Z","lastTransitionTime":"2026-02-27T16:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.403885 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.403962 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.403993 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.404023 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.404047 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:48Z","lastTransitionTime":"2026-02-27T16:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.507864 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.507928 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.507951 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.507978 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.507997 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:48Z","lastTransitionTime":"2026-02-27T16:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.520502 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.520561 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.520660 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.520671 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:48 crc kubenswrapper[4751]: E0227 16:25:48.520647 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:48 crc kubenswrapper[4751]: E0227 16:25:48.520862 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:48 crc kubenswrapper[4751]: E0227 16:25:48.521180 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:48 crc kubenswrapper[4751]: E0227 16:25:48.521317 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.545233 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.579332 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.596536 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.612304 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.614073 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.614113 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.614166 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.614187 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.614202 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:48Z","lastTransitionTime":"2026-02-27T16:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.644020 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:40Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI0227 16:25:40.169606 6706 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI0227 16:25:40.169622 6706 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI0227 16:25:40.169666 6706 factory.go:1336] Added *v1.Node event handler 7\\\\nI0227 16:25:40.169707 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0227 16:25:40.169722 6706 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0227 16:25:40.169746 6706 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI0227 16:25:40.169765 6706 factory.go:656] Stopping watch factory\\\\nI0227 16:25:40.169782 6706 handler.go:208] Removed *v1.Node event handler 7\\\\nI0227 16:25:40.169791 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI0227 16:25:40.170077 6706 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0227 16:25:40.170143 6706 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0227 16:25:40.170173 6706 ovnkube.go:599] Stopped ovnkube\\\\nI0227 16:25:40.170191 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0227 16:25:40.170291 6706 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.660074 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.679091 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.694450 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.715228 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.716263 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.716325 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.716338 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.716355 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.716367 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:48Z","lastTransitionTime":"2026-02-27T16:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.733680 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.747281 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.761870 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.783673 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.806978 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.819274 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.819467 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.819524 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.819558 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.819582 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:48Z","lastTransitionTime":"2026-02-27T16:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.833519 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.856419 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:48Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.922999 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.923191 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.923274 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.923356 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:48 crc kubenswrapper[4751]: I0227 16:25:48.923468 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:48Z","lastTransitionTime":"2026-02-27T16:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.012872 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.012930 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.012948 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.012978 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.013000 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: E0227 16:25:49.036741 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:49Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.041760 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.041806 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.041817 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.041835 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.042072 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: E0227 16:25:49.057444 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:49Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.062343 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.062544 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.062655 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.062752 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.062838 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: E0227 16:25:49.089900 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:49Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.093783 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.093847 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.093866 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.093892 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.093910 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: E0227 16:25:49.109215 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:49Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.114450 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.114599 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.114820 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.115057 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.115253 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: E0227 16:25:49.131819 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:49Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:49 crc kubenswrapper[4751]: E0227 16:25:49.132147 4751 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.134165 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.134383 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.134610 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.134758 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.134882 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.237667 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.237730 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.237747 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.237773 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.237790 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.340857 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.340919 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.340938 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.340963 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.340982 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.444545 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.445152 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.445171 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.445198 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.445215 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.548038 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.548107 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.548138 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.548162 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.548181 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.651657 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.651714 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.651731 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.651754 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.651773 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.755365 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.755425 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.755435 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.755450 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.755459 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.858520 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.858557 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.858566 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.858580 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.858589 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.961697 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.961765 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.961782 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.961807 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:49 crc kubenswrapper[4751]: I0227 16:25:49.961826 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:49Z","lastTransitionTime":"2026-02-27T16:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.064940 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.064986 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.064998 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.065016 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.065028 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:50Z","lastTransitionTime":"2026-02-27T16:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.167646 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.167742 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.167766 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.167798 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.167824 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:50Z","lastTransitionTime":"2026-02-27T16:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.271331 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.271398 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.271468 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.271494 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.271517 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:50Z","lastTransitionTime":"2026-02-27T16:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.374978 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.375043 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.375060 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.375085 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.375103 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:50Z","lastTransitionTime":"2026-02-27T16:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.478292 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.478383 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.478463 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.478489 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.478506 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:50Z","lastTransitionTime":"2026-02-27T16:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.520715 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.520786 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.520830 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:50 crc kubenswrapper[4751]: E0227 16:25:50.520944 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.520990 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:50 crc kubenswrapper[4751]: E0227 16:25:50.521100 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:50 crc kubenswrapper[4751]: E0227 16:25:50.521193 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:50 crc kubenswrapper[4751]: E0227 16:25:50.521307 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.581742 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.581821 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.581845 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.581873 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.581896 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:50Z","lastTransitionTime":"2026-02-27T16:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.685338 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.685435 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.685462 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.685491 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.685513 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:50Z","lastTransitionTime":"2026-02-27T16:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.787547 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.787616 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.787634 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.787669 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.787689 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:50Z","lastTransitionTime":"2026-02-27T16:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.891159 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.891222 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.891262 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.891289 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.891307 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:50Z","lastTransitionTime":"2026-02-27T16:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.994489 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.994545 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.994564 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.994585 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:50 crc kubenswrapper[4751]: I0227 16:25:50.994601 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:50Z","lastTransitionTime":"2026-02-27T16:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.098100 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.098158 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.098176 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.098204 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.098221 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:51Z","lastTransitionTime":"2026-02-27T16:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.201602 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.201674 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.201698 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.201724 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.201745 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:51Z","lastTransitionTime":"2026-02-27T16:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.304171 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.304233 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.304251 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.304274 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.304291 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:51Z","lastTransitionTime":"2026-02-27T16:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.406434 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.406462 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.406471 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.406485 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.406495 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:51Z","lastTransitionTime":"2026-02-27T16:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.508711 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.508776 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.508793 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.508817 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.508847 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:51Z","lastTransitionTime":"2026-02-27T16:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.612156 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.612225 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.612244 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.612271 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.612288 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:51Z","lastTransitionTime":"2026-02-27T16:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.715182 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.715540 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.715622 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.715694 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.715765 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:51Z","lastTransitionTime":"2026-02-27T16:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.819998 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.820764 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.820941 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.821109 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.821256 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:51Z","lastTransitionTime":"2026-02-27T16:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.925331 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.925551 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.925571 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.925598 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:51 crc kubenswrapper[4751]: I0227 16:25:51.925619 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:51Z","lastTransitionTime":"2026-02-27T16:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.029701 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.029774 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.029788 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.029807 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.029821 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:52Z","lastTransitionTime":"2026-02-27T16:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.132786 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.132944 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.132967 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.133034 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.133059 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:52Z","lastTransitionTime":"2026-02-27T16:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.236615 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.236661 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.236673 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.236686 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.236696 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:52Z","lastTransitionTime":"2026-02-27T16:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.340550 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.340611 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.340640 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.340668 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.340688 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:52Z","lastTransitionTime":"2026-02-27T16:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.443652 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.443727 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.443749 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.443782 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.443803 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:52Z","lastTransitionTime":"2026-02-27T16:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.520224 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.520279 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.520310 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:52 crc kubenswrapper[4751]: E0227 16:25:52.520559 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.520649 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:52 crc kubenswrapper[4751]: E0227 16:25:52.520910 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:52 crc kubenswrapper[4751]: E0227 16:25:52.521015 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:52 crc kubenswrapper[4751]: E0227 16:25:52.521074 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.546239 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.546375 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.546441 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.546474 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.546498 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:52Z","lastTransitionTime":"2026-02-27T16:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.649030 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.649102 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.649120 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.649146 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.649164 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:52Z","lastTransitionTime":"2026-02-27T16:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.752644 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.752720 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.752738 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.752764 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.752781 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:52Z","lastTransitionTime":"2026-02-27T16:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.856045 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.856127 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.856160 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.856192 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.856216 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:52Z","lastTransitionTime":"2026-02-27T16:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.958787 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.958961 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.958983 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.959007 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:52 crc kubenswrapper[4751]: I0227 16:25:52.959029 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:52Z","lastTransitionTime":"2026-02-27T16:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.062080 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.062147 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.062163 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.062190 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.062206 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:53Z","lastTransitionTime":"2026-02-27T16:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.165730 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.165799 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.165828 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.165858 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.165922 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:53Z","lastTransitionTime":"2026-02-27T16:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.268653 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.268714 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.268732 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.268850 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.268891 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:53Z","lastTransitionTime":"2026-02-27T16:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.371731 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.371821 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.371836 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.371853 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.371865 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:53Z","lastTransitionTime":"2026-02-27T16:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.475285 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.475349 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.475366 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.475390 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.475454 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:53Z","lastTransitionTime":"2026-02-27T16:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.520816 4751 scope.go:117] "RemoveContainer" containerID="c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.578602 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.578660 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.578676 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.578700 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.578716 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:53Z","lastTransitionTime":"2026-02-27T16:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.681590 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.681651 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.681673 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.681702 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.681725 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:53Z","lastTransitionTime":"2026-02-27T16:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.783767 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.783802 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.783810 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.783822 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.783831 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:53Z","lastTransitionTime":"2026-02-27T16:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.885492 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.885535 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.885546 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.885564 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.885584 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:53Z","lastTransitionTime":"2026-02-27T16:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.988320 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.988366 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.988378 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.988422 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:53 crc kubenswrapper[4751]: I0227 16:25:53.988437 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:53Z","lastTransitionTime":"2026-02-27T16:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.091234 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.091282 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.091292 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.091307 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.091322 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:54Z","lastTransitionTime":"2026-02-27T16:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.129877 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/1.log" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.132823 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d"} Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.133257 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.145587 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.165339 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.180060 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.193922 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.193964 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.193973 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.193988 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.193997 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:54Z","lastTransitionTime":"2026-02-27T16:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.193988 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.204711 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.216458 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.224961 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.235105 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.255512 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.266554 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.277433 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.291764 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.295472 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.295512 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.295522 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.295538 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.295548 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:54Z","lastTransitionTime":"2026-02-27T16:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.305113 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.319037 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.335063 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:40Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI0227 16:25:40.169606 6706 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI0227 16:25:40.169622 6706 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI0227 16:25:40.169666 6706 factory.go:1336] Added *v1.Node event handler 7\\\\nI0227 16:25:40.169707 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0227 16:25:40.169722 6706 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0227 16:25:40.169746 6706 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI0227 16:25:40.169765 6706 factory.go:656] Stopping watch factory\\\\nI0227 16:25:40.169782 6706 handler.go:208] Removed *v1.Node event handler 7\\\\nI0227 16:25:40.169791 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI0227 16:25:40.170077 6706 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0227 16:25:40.170143 6706 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0227 16:25:40.170173 6706 ovnkube.go:599] Stopped ovnkube\\\\nI0227 16:25:40.170191 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0227 16:25:40.170291 6706 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.347618 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:54Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.398200 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.398232 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.398244 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.398259 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.398271 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:54Z","lastTransitionTime":"2026-02-27T16:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.501253 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.501294 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.501304 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.501320 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.501331 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:54Z","lastTransitionTime":"2026-02-27T16:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.520844 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:54 crc kubenswrapper[4751]: E0227 16:25:54.521071 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.521190 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:54 crc kubenswrapper[4751]: E0227 16:25:54.521255 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.521307 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:54 crc kubenswrapper[4751]: E0227 16:25:54.521361 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.521692 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:54 crc kubenswrapper[4751]: E0227 16:25:54.521942 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.604029 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.604070 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.604079 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.604094 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.604105 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:54Z","lastTransitionTime":"2026-02-27T16:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.707750 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.707851 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.707870 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.707901 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.707919 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:54Z","lastTransitionTime":"2026-02-27T16:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.810922 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.810989 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.811007 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.811033 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.811052 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:54Z","lastTransitionTime":"2026-02-27T16:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.918074 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.918181 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.918202 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.918233 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:54 crc kubenswrapper[4751]: I0227 16:25:54.918509 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:54Z","lastTransitionTime":"2026-02-27T16:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.021952 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.022033 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.022052 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.022075 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.022092 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:55Z","lastTransitionTime":"2026-02-27T16:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.126142 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.126218 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.126246 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.126279 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.126301 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:55Z","lastTransitionTime":"2026-02-27T16:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.139897 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/2.log" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.141056 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/1.log" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.145323 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d" exitCode=1 Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.145379 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.145481 4751 scope.go:117] "RemoveContainer" containerID="c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.147974 4751 scope.go:117] "RemoveContainer" containerID="674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d" Feb 27 16:25:55 crc kubenswrapper[4751]: E0227 16:25:55.152696 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.168245 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.188654 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.204817 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.224612 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.229778 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.229822 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.229831 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.229849 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.229860 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:55Z","lastTransitionTime":"2026-02-27T16:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.240340 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.255669 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.306257 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.326216 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.332460 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.332496 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.332508 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.332524 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.332537 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:55Z","lastTransitionTime":"2026-02-27T16:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.337865 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.352938 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.382383 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.398693 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.413789 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.429739 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.434975 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.435071 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.435129 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.435194 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.435265 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:55Z","lastTransitionTime":"2026-02-27T16:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.455951 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c792566a11b8aee56e4009dc6b61d3f8105241570413f344c0753e65933fa17a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:40Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI0227 16:25:40.169606 6706 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI0227 16:25:40.169622 6706 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI0227 16:25:40.169666 6706 factory.go:1336] Added *v1.Node event handler 7\\\\nI0227 16:25:40.169707 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0227 16:25:40.169722 6706 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0227 16:25:40.169746 6706 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI0227 16:25:40.169765 6706 factory.go:656] Stopping watch factory\\\\nI0227 16:25:40.169782 6706 handler.go:208] Removed *v1.Node event handler 7\\\\nI0227 16:25:40.169791 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI0227 16:25:40.170077 6706 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0227 16:25:40.170143 6706 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0227 16:25:40.170173 6706 ovnkube.go:599] Stopped ovnkube\\\\nI0227 16:25:40.170191 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0227 16:25:40.170291 6706 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:54Z\\\",\\\"message\\\":\\\"ck:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0227 16:25:54.377911 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377922 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377930 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377931 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377936 6908 ovn.go:134] Ensuring zone local for Pod openshift-etcd/etcd-crc in node crc\\\\nI0227 16:25:54.377895 6908 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld in node crc\\\\nI0227 16:25:54.377943 6908 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI0227 16:25:54.377947 6908 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF0227 16:25:54.377951 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has sto\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.467538 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:55Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.538260 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.538322 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.538344 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.538366 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.538384 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:55Z","lastTransitionTime":"2026-02-27T16:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.641656 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.642025 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.642157 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.642287 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.642435 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:55Z","lastTransitionTime":"2026-02-27T16:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.745123 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.745505 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.745682 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.745882 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.746070 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:55Z","lastTransitionTime":"2026-02-27T16:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.849232 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.849301 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.849324 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.849354 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.849376 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:55Z","lastTransitionTime":"2026-02-27T16:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.952868 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.952937 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.952962 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.952991 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:55 crc kubenswrapper[4751]: I0227 16:25:55.953013 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:55Z","lastTransitionTime":"2026-02-27T16:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.056206 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.056270 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.056293 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.056322 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.056342 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:56Z","lastTransitionTime":"2026-02-27T16:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.151799 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/2.log" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.157219 4751 scope.go:117] "RemoveContainer" containerID="674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d" Feb 27 16:25:56 crc kubenswrapper[4751]: E0227 16:25:56.157739 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.158539 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.158613 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.158637 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.158669 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.158691 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:56Z","lastTransitionTime":"2026-02-27T16:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.184293 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.206197 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.228753 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.255958 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:54Z\\\",\\\"message\\\":\\\"ck:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0227 16:25:54.377911 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377922 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377930 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377931 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377936 6908 ovn.go:134] Ensuring zone local for Pod openshift-etcd/etcd-crc in node crc\\\\nI0227 16:25:54.377895 6908 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld in node crc\\\\nI0227 16:25:54.377943 6908 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI0227 16:25:54.377947 6908 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF0227 16:25:54.377951 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has sto\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.261735 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.261793 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.261809 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.261834 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.261852 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:56Z","lastTransitionTime":"2026-02-27T16:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.271304 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.291506 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.311638 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.330641 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.350913 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.364569 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.364632 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.364650 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.364674 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.364691 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:56Z","lastTransitionTime":"2026-02-27T16:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.365069 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.377651 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.397101 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.411677 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.423815 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.436897 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.456453 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:56Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.467853 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.467923 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.467940 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.467971 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.467988 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:56Z","lastTransitionTime":"2026-02-27T16:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.520512 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.520555 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.520609 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.520625 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:56 crc kubenswrapper[4751]: E0227 16:25:56.520698 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:56 crc kubenswrapper[4751]: E0227 16:25:56.520819 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:56 crc kubenswrapper[4751]: E0227 16:25:56.520950 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:56 crc kubenswrapper[4751]: E0227 16:25:56.521076 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.571271 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.571353 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.571370 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.571397 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.571450 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:56Z","lastTransitionTime":"2026-02-27T16:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.675022 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.675074 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.675091 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.675113 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.675130 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:56Z","lastTransitionTime":"2026-02-27T16:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.777449 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.777519 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.777537 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.777562 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.777581 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:56Z","lastTransitionTime":"2026-02-27T16:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.879987 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.880054 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.880072 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.880096 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.880116 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:56Z","lastTransitionTime":"2026-02-27T16:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.983428 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.983892 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.984031 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.984173 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:56 crc kubenswrapper[4751]: I0227 16:25:56.984308 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:56Z","lastTransitionTime":"2026-02-27T16:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.088064 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.088118 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.088134 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.088157 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.088174 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:57Z","lastTransitionTime":"2026-02-27T16:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.191235 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.191319 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.191337 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.191368 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.191388 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:57Z","lastTransitionTime":"2026-02-27T16:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.294856 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.294929 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.294946 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.294974 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.294996 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:57Z","lastTransitionTime":"2026-02-27T16:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.398095 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.398175 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.398196 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.398220 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.398236 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:57Z","lastTransitionTime":"2026-02-27T16:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.500922 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.500998 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.501017 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.501041 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.501058 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:57Z","lastTransitionTime":"2026-02-27T16:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.533217 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.604467 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.604536 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.604552 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.604578 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.604596 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:57Z","lastTransitionTime":"2026-02-27T16:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.707911 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.707974 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.708166 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.708189 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.708205 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:57Z","lastTransitionTime":"2026-02-27T16:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.810743 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.810785 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.810796 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.810813 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.810825 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:57Z","lastTransitionTime":"2026-02-27T16:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.914048 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.914111 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.914130 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.914155 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:57 crc kubenswrapper[4751]: I0227 16:25:57.914170 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:57Z","lastTransitionTime":"2026-02-27T16:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.016701 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.016760 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.016777 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.016802 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.016822 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:58Z","lastTransitionTime":"2026-02-27T16:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.119504 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.119571 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.119592 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.119617 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.119635 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:58Z","lastTransitionTime":"2026-02-27T16:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.222383 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.222483 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.222506 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.222534 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.222556 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:58Z","lastTransitionTime":"2026-02-27T16:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.325842 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.325910 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.325928 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.325951 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.325970 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:58Z","lastTransitionTime":"2026-02-27T16:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.428659 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.428730 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.428749 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.428774 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.428792 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:58Z","lastTransitionTime":"2026-02-27T16:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.520707 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.520799 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:25:58 crc kubenswrapper[4751]: E0227 16:25:58.521551 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.520846 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:25:58 crc kubenswrapper[4751]: E0227 16:25:58.521780 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.521843 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:25:58 crc kubenswrapper[4751]: E0227 16:25:58.522054 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:25:58 crc kubenswrapper[4751]: E0227 16:25:58.522181 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:25:58 crc kubenswrapper[4751]: E0227 16:25:58.529242 4751 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.544497 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.559698 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.580187 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.600333 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.620240 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: E0227 16:25:58.622097 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.636106 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.651550 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.669182 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.701642 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.717934 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.737688 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.754715 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3059be4d-025f-48c7-8d37-edc542161c80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.776545 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.797492 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.818678 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.839966 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:54Z\\\",\\\"message\\\":\\\"ck:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0227 16:25:54.377911 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377922 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377930 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377931 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377936 6908 ovn.go:134] Ensuring zone local for Pod openshift-etcd/etcd-crc in node crc\\\\nI0227 16:25:54.377895 6908 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld in node crc\\\\nI0227 16:25:54.377943 6908 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI0227 16:25:54.377947 6908 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF0227 16:25:54.377951 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has sto\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:58 crc kubenswrapper[4751]: I0227 16:25:58.858111 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:58Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.278491 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.278889 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.279126 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.279324 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.279574 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:59Z","lastTransitionTime":"2026-02-27T16:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:59 crc kubenswrapper[4751]: E0227 16:25:59.299940 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:59Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.304614 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.304851 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.305027 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.305211 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.305383 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:59Z","lastTransitionTime":"2026-02-27T16:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:59 crc kubenswrapper[4751]: E0227 16:25:59.327211 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:59Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.332272 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.332340 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.332363 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.332393 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.332454 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:59Z","lastTransitionTime":"2026-02-27T16:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:59 crc kubenswrapper[4751]: E0227 16:25:59.353708 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:59Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.358786 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.358845 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.358864 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.358888 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.358906 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:59Z","lastTransitionTime":"2026-02-27T16:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:59 crc kubenswrapper[4751]: E0227 16:25:59.381005 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:59Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.386752 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.386814 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.386831 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.386854 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:25:59 crc kubenswrapper[4751]: I0227 16:25:59.386873 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:25:59Z","lastTransitionTime":"2026-02-27T16:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:25:59 crc kubenswrapper[4751]: E0227 16:25:59.407255 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:25:59Z is after 2025-08-24T17:21:41Z" Feb 27 16:25:59 crc kubenswrapper[4751]: E0227 16:25:59.407637 4751 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 27 16:26:00 crc kubenswrapper[4751]: I0227 16:26:00.390140 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:26:00 crc kubenswrapper[4751]: I0227 16:26:00.390312 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:00 crc kubenswrapper[4751]: I0227 16:26:00.390441 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:00 crc kubenswrapper[4751]: I0227 16:26:00.390520 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.390705 4751 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.390793 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:26:32.390765836 +0000 UTC m=+154.537780323 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.390897 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:26:32.390881039 +0000 UTC m=+154.537895526 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.391012 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.391041 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.391063 4751 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.391115 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-27 16:26:32.391098455 +0000 UTC m=+154.538112952 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.391182 4751 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.391227 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:26:32.391213408 +0000 UTC m=+154.538227895 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:26:00 crc kubenswrapper[4751]: I0227 16:26:00.491745 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:00 crc kubenswrapper[4751]: I0227 16:26:00.491818 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.491939 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.491956 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.491969 4751 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.492024 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-27 16:26:32.492008174 +0000 UTC m=+154.639022621 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.492078 4751 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.492486 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs podName:7da183a7-dcda-4e22-b135-b1ef0d593811 nodeName:}" failed. No retries permitted until 2026-02-27 16:26:32.492093886 +0000 UTC m=+154.639108333 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs") pod "network-metrics-daemon-4bnbv" (UID: "7da183a7-dcda-4e22-b135-b1ef0d593811") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:26:00 crc kubenswrapper[4751]: I0227 16:26:00.519933 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:00 crc kubenswrapper[4751]: I0227 16:26:00.520027 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:00 crc kubenswrapper[4751]: I0227 16:26:00.520099 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:00 crc kubenswrapper[4751]: I0227 16:26:00.520154 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.520046 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.521378 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.521190 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:00 crc kubenswrapper[4751]: E0227 16:26:00.521750 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:01 crc kubenswrapper[4751]: I0227 16:26:01.841294 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:26:01 crc kubenswrapper[4751]: I0227 16:26:01.874234 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:01Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:01 crc kubenswrapper[4751]: I0227 16:26:01.889605 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:01Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:01 crc kubenswrapper[4751]: I0227 16:26:01.906283 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:01Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:01 crc kubenswrapper[4751]: I0227 16:26:01.925512 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:54Z\\\",\\\"message\\\":\\\"ck:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0227 16:25:54.377911 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377922 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377930 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377931 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377936 6908 ovn.go:134] Ensuring zone local for Pod openshift-etcd/etcd-crc in node crc\\\\nI0227 16:25:54.377895 6908 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld in node crc\\\\nI0227 16:25:54.377943 6908 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI0227 16:25:54.377947 6908 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF0227 16:25:54.377951 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has sto\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:01Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:01 crc kubenswrapper[4751]: I0227 16:26:01.940239 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:01Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:01 crc kubenswrapper[4751]: I0227 16:26:01.958002 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3059be4d-025f-48c7-8d37-edc542161c80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:01Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:01 crc kubenswrapper[4751]: I0227 16:26:01.978738 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:01Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:01 crc kubenswrapper[4751]: I0227 16:26:01.995437 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:01Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.015700 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:02Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.034810 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:02Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.050515 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:02Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.064357 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:02Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.086782 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:02Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.106024 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:02Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.125161 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:02Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.146308 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:02Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.164099 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:02Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.519964 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.520027 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.520093 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:02 crc kubenswrapper[4751]: E0227 16:26:02.520252 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:02 crc kubenswrapper[4751]: I0227 16:26:02.520296 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:02 crc kubenswrapper[4751]: E0227 16:26:02.520536 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:02 crc kubenswrapper[4751]: E0227 16:26:02.520663 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:02 crc kubenswrapper[4751]: E0227 16:26:02.520704 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:03 crc kubenswrapper[4751]: E0227 16:26:03.624069 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:04 crc kubenswrapper[4751]: I0227 16:26:04.519970 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:04 crc kubenswrapper[4751]: I0227 16:26:04.520501 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:04 crc kubenswrapper[4751]: I0227 16:26:04.520651 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:04 crc kubenswrapper[4751]: E0227 16:26:04.520772 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:04 crc kubenswrapper[4751]: E0227 16:26:04.520584 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:04 crc kubenswrapper[4751]: I0227 16:26:04.520768 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:04 crc kubenswrapper[4751]: E0227 16:26:04.520921 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:04 crc kubenswrapper[4751]: E0227 16:26:04.521307 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:06 crc kubenswrapper[4751]: I0227 16:26:06.519762 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:06 crc kubenswrapper[4751]: E0227 16:26:06.520280 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:06 crc kubenswrapper[4751]: I0227 16:26:06.519806 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:06 crc kubenswrapper[4751]: E0227 16:26:06.520447 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:06 crc kubenswrapper[4751]: I0227 16:26:06.519974 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:06 crc kubenswrapper[4751]: I0227 16:26:06.519762 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:06 crc kubenswrapper[4751]: E0227 16:26:06.520697 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:06 crc kubenswrapper[4751]: E0227 16:26:06.520831 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:07 crc kubenswrapper[4751]: I0227 16:26:07.532031 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.520061 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:08 crc kubenswrapper[4751]: E0227 16:26:08.520297 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.520497 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.520643 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.520768 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:08 crc kubenswrapper[4751]: E0227 16:26:08.520968 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:08 crc kubenswrapper[4751]: E0227 16:26:08.521147 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:08 crc kubenswrapper[4751]: E0227 16:26:08.521788 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.522658 4751 scope.go:117] "RemoveContainer" containerID="674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d" Feb 27 16:26:08 crc kubenswrapper[4751]: E0227 16:26:08.522981 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.543917 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.574106 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.598676 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.621351 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: E0227 16:26:08.625645 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.648680 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.669113 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.688207 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.710431 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"744a750b-0dea-4975-9f4b-b9fee0972208\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4cbc47ef2524239d9c1679189bfa96296620af4fcfe02507695d700d6455eda4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:31Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0227 16:24:00.806855 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0227 16:24:00.808926 1 observer_polling.go:159] Starting file observer\\\\nI0227 16:24:00.862719 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI0227 16:24:00.868567 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0227 16:24:31.264185 1 cmd.go:179] failed checking apiserver connectivity: Unauthorized\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2444e31d4c252ddbff520f5604104b24d3d356ad1c13579d3c22e3e12136de0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36a6674fde1a5972f2881ddc34b464ea203d05e1e48901c4547ea4aa99085faa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.730008 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.754388 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.769339 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.783765 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.801467 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3059be4d-025f-48c7-8d37-edc542161c80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.827970 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.846392 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.867384 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.904151 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:54Z\\\",\\\"message\\\":\\\"ck:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0227 16:25:54.377911 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377922 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377930 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377931 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377936 6908 ovn.go:134] Ensuring zone local for Pod openshift-etcd/etcd-crc in node crc\\\\nI0227 16:25:54.377895 6908 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld in node crc\\\\nI0227 16:25:54.377943 6908 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI0227 16:25:54.377947 6908 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF0227 16:25:54.377951 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has sto\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:08 crc kubenswrapper[4751]: I0227 16:26:08.926956 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:08Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.809435 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.809505 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.809523 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.809549 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.809567 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:09Z","lastTransitionTime":"2026-02-27T16:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:09 crc kubenswrapper[4751]: E0227 16:26:09.829891 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:09Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.835782 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.835853 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.835876 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.835908 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.835931 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:09Z","lastTransitionTime":"2026-02-27T16:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:09 crc kubenswrapper[4751]: E0227 16:26:09.858645 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:09Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.863794 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.863854 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.863920 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.863948 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.863967 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:09Z","lastTransitionTime":"2026-02-27T16:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:09 crc kubenswrapper[4751]: E0227 16:26:09.886127 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:09Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.891123 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.891176 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.891188 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.891208 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.891222 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:09Z","lastTransitionTime":"2026-02-27T16:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:09 crc kubenswrapper[4751]: E0227 16:26:09.909815 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:09Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.915196 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.915256 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.915275 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.915300 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:09 crc kubenswrapper[4751]: I0227 16:26:09.915317 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:09Z","lastTransitionTime":"2026-02-27T16:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:09 crc kubenswrapper[4751]: E0227 16:26:09.935704 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:09Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:09 crc kubenswrapper[4751]: E0227 16:26:09.935920 4751 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 27 16:26:10 crc kubenswrapper[4751]: I0227 16:26:10.520229 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:10 crc kubenswrapper[4751]: E0227 16:26:10.520371 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:10 crc kubenswrapper[4751]: I0227 16:26:10.520606 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:10 crc kubenswrapper[4751]: I0227 16:26:10.520655 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:10 crc kubenswrapper[4751]: E0227 16:26:10.520704 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:10 crc kubenswrapper[4751]: I0227 16:26:10.520723 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:10 crc kubenswrapper[4751]: E0227 16:26:10.520790 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:10 crc kubenswrapper[4751]: E0227 16:26:10.520860 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:12 crc kubenswrapper[4751]: I0227 16:26:12.520034 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:12 crc kubenswrapper[4751]: E0227 16:26:12.520217 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:12 crc kubenswrapper[4751]: I0227 16:26:12.520553 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:12 crc kubenswrapper[4751]: I0227 16:26:12.520617 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:12 crc kubenswrapper[4751]: I0227 16:26:12.520627 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:12 crc kubenswrapper[4751]: E0227 16:26:12.520724 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:12 crc kubenswrapper[4751]: E0227 16:26:12.520808 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:12 crc kubenswrapper[4751]: E0227 16:26:12.520898 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:13 crc kubenswrapper[4751]: E0227 16:26:13.628211 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:14 crc kubenswrapper[4751]: I0227 16:26:14.520363 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:14 crc kubenswrapper[4751]: E0227 16:26:14.520607 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:14 crc kubenswrapper[4751]: I0227 16:26:14.520631 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:14 crc kubenswrapper[4751]: I0227 16:26:14.520673 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:14 crc kubenswrapper[4751]: E0227 16:26:14.520778 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:14 crc kubenswrapper[4751]: E0227 16:26:14.520884 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:14 crc kubenswrapper[4751]: I0227 16:26:14.520944 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:14 crc kubenswrapper[4751]: E0227 16:26:14.520989 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.243338 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/0.log" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.243392 4751 generic.go:334] "Generic (PLEG): container finished" podID="dc07559e-a5c7-458c-b3ec-646981b798c1" containerID="e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96" exitCode=1 Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.243462 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4jc4n" event={"ID":"dc07559e-a5c7-458c-b3ec-646981b798c1","Type":"ContainerDied","Data":"e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96"} Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.244021 4751 scope.go:117] "RemoveContainer" containerID="e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.260217 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.278779 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.294739 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:26:16Z\\\",\\\"message\\\":\\\"2026-02-27T16:25:30+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca\\\\n2026-02-27T16:25:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca to /host/opt/cni/bin/\\\\n2026-02-27T16:25:30Z [verbose] multus-daemon started\\\\n2026-02-27T16:25:30Z [verbose] Readiness Indicator file check\\\\n2026-02-27T16:26:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.310986 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.329229 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.347240 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.368590 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.388215 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.409220 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"744a750b-0dea-4975-9f4b-b9fee0972208\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4cbc47ef2524239d9c1679189bfa96296620af4fcfe02507695d700d6455eda4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:31Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0227 16:24:00.806855 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0227 16:24:00.808926 1 observer_polling.go:159] Starting file observer\\\\nI0227 16:24:00.862719 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI0227 16:24:00.868567 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0227 16:24:31.264185 1 cmd.go:179] failed checking apiserver connectivity: Unauthorized\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2444e31d4c252ddbff520f5604104b24d3d356ad1c13579d3c22e3e12136de0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36a6674fde1a5972f2881ddc34b464ea203d05e1e48901c4547ea4aa99085faa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.428379 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.462168 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.476628 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.498723 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.520268 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.520332 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.520458 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:16 crc kubenswrapper[4751]: E0227 16:26:16.520455 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:16 crc kubenswrapper[4751]: E0227 16:26:16.520655 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.520724 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:16 crc kubenswrapper[4751]: E0227 16:26:16.520806 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:16 crc kubenswrapper[4751]: E0227 16:26:16.520919 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.529549 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.560143 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:54Z\\\",\\\"message\\\":\\\"ck:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0227 16:25:54.377911 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377922 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377930 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377931 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377936 6908 ovn.go:134] Ensuring zone local for Pod openshift-etcd/etcd-crc in node crc\\\\nI0227 16:25:54.377895 6908 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld in node crc\\\\nI0227 16:25:54.377943 6908 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI0227 16:25:54.377947 6908 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF0227 16:25:54.377951 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has sto\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.577761 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.597345 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3059be4d-025f-48c7-8d37-edc542161c80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:16 crc kubenswrapper[4751]: I0227 16:26:16.615101 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:16Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.251033 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/0.log" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.251129 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4jc4n" event={"ID":"dc07559e-a5c7-458c-b3ec-646981b798c1","Type":"ContainerStarted","Data":"1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6"} Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.275332 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.306846 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:54Z\\\",\\\"message\\\":\\\"ck:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0227 16:25:54.377911 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377922 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377930 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377931 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377936 6908 ovn.go:134] Ensuring zone local for Pod openshift-etcd/etcd-crc in node crc\\\\nI0227 16:25:54.377895 6908 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld in node crc\\\\nI0227 16:25:54.377943 6908 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI0227 16:25:54.377947 6908 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF0227 16:25:54.377951 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has sto\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.325166 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.343968 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3059be4d-025f-48c7-8d37-edc542161c80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.366866 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.386156 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.403125 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.425358 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:26:16Z\\\",\\\"message\\\":\\\"2026-02-27T16:25:30+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca\\\\n2026-02-27T16:25:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca to /host/opt/cni/bin/\\\\n2026-02-27T16:25:30Z [verbose] multus-daemon started\\\\n2026-02-27T16:25:30Z [verbose] Readiness Indicator file check\\\\n2026-02-27T16:26:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.438593 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.454312 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.474668 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.491743 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.511543 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.530827 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"744a750b-0dea-4975-9f4b-b9fee0972208\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4cbc47ef2524239d9c1679189bfa96296620af4fcfe02507695d700d6455eda4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:31Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0227 16:24:00.806855 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0227 16:24:00.808926 1 observer_polling.go:159] Starting file observer\\\\nI0227 16:24:00.862719 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI0227 16:24:00.868567 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0227 16:24:31.264185 1 cmd.go:179] failed checking apiserver connectivity: Unauthorized\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2444e31d4c252ddbff520f5604104b24d3d356ad1c13579d3c22e3e12136de0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36a6674fde1a5972f2881ddc34b464ea203d05e1e48901c4547ea4aa99085faa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.549821 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.581591 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.596142 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:17 crc kubenswrapper[4751]: I0227 16:26:17.612350 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:17Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.520387 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.520583 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.520465 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.520449 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:18 crc kubenswrapper[4751]: E0227 16:26:18.520958 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:18 crc kubenswrapper[4751]: E0227 16:26:18.521098 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:18 crc kubenswrapper[4751]: E0227 16:26:18.521376 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:18 crc kubenswrapper[4751]: E0227 16:26:18.521555 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.540103 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.564075 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.586995 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.607941 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: E0227 16:26:18.630580 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.631216 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.650933 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:26:16Z\\\",\\\"message\\\":\\\"2026-02-27T16:25:30+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca\\\\n2026-02-27T16:25:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca to /host/opt/cni/bin/\\\\n2026-02-27T16:25:30Z [verbose] multus-daemon started\\\\n2026-02-27T16:25:30Z [verbose] Readiness Indicator file check\\\\n2026-02-27T16:26:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.671532 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.691859 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"744a750b-0dea-4975-9f4b-b9fee0972208\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4cbc47ef2524239d9c1679189bfa96296620af4fcfe02507695d700d6455eda4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:31Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0227 16:24:00.806855 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0227 16:24:00.808926 1 observer_polling.go:159] Starting file observer\\\\nI0227 16:24:00.862719 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI0227 16:24:00.868567 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0227 16:24:31.264185 1 cmd.go:179] failed checking apiserver connectivity: Unauthorized\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2444e31d4c252ddbff520f5604104b24d3d356ad1c13579d3c22e3e12136de0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36a6674fde1a5972f2881ddc34b464ea203d05e1e48901c4547ea4aa99085faa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.710079 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.745217 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.759425 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.776477 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.796037 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3059be4d-025f-48c7-8d37-edc542161c80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.820245 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.856281 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.876944 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.903943 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:54Z\\\",\\\"message\\\":\\\"ck:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0227 16:25:54.377911 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377922 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377930 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377931 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377936 6908 ovn.go:134] Ensuring zone local for Pod openshift-etcd/etcd-crc in node crc\\\\nI0227 16:25:54.377895 6908 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld in node crc\\\\nI0227 16:25:54.377943 6908 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI0227 16:25:54.377947 6908 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF0227 16:25:54.377951 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has sto\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:18 crc kubenswrapper[4751]: I0227 16:26:18.928374 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:18Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.247356 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.247426 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.247436 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.247449 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.247459 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:20Z","lastTransitionTime":"2026-02-27T16:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:20 crc kubenswrapper[4751]: E0227 16:26:20.259890 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:20Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.265778 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.265839 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.265853 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.265877 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.265893 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:20Z","lastTransitionTime":"2026-02-27T16:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:20 crc kubenswrapper[4751]: E0227 16:26:20.280212 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:20Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.285594 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.285652 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.285668 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.285693 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.285710 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:20Z","lastTransitionTime":"2026-02-27T16:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:20 crc kubenswrapper[4751]: E0227 16:26:20.300627 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:20Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.304537 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.304579 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.304590 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.304609 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.304621 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:20Z","lastTransitionTime":"2026-02-27T16:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:20 crc kubenswrapper[4751]: E0227 16:26:20.317823 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:20Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.323246 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.323289 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.323304 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.323321 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.323334 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:20Z","lastTransitionTime":"2026-02-27T16:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:20 crc kubenswrapper[4751]: E0227 16:26:20.341814 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:20Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:20 crc kubenswrapper[4751]: E0227 16:26:20.341936 4751 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.519941 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.520175 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.520339 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:20 crc kubenswrapper[4751]: E0227 16:26:20.520333 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:20 crc kubenswrapper[4751]: I0227 16:26:20.520359 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:20 crc kubenswrapper[4751]: E0227 16:26:20.520700 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:20 crc kubenswrapper[4751]: E0227 16:26:20.520963 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:20 crc kubenswrapper[4751]: E0227 16:26:20.521050 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:22 crc kubenswrapper[4751]: I0227 16:26:22.520601 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:22 crc kubenswrapper[4751]: I0227 16:26:22.520703 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:22 crc kubenswrapper[4751]: I0227 16:26:22.520743 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:22 crc kubenswrapper[4751]: E0227 16:26:22.520894 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:22 crc kubenswrapper[4751]: I0227 16:26:22.520954 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:22 crc kubenswrapper[4751]: E0227 16:26:22.521077 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:22 crc kubenswrapper[4751]: E0227 16:26:22.521128 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:22 crc kubenswrapper[4751]: E0227 16:26:22.521199 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:23 crc kubenswrapper[4751]: I0227 16:26:23.522020 4751 scope.go:117] "RemoveContainer" containerID="674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d" Feb 27 16:26:23 crc kubenswrapper[4751]: E0227 16:26:23.632674 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.282613 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/2.log" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.286230 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e"} Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.286920 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.309996 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"744a750b-0dea-4975-9f4b-b9fee0972208\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4cbc47ef2524239d9c1679189bfa96296620af4fcfe02507695d700d6455eda4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:31Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0227 16:24:00.806855 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0227 16:24:00.808926 1 observer_polling.go:159] Starting file observer\\\\nI0227 16:24:00.862719 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI0227 16:24:00.868567 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0227 16:24:31.264185 1 cmd.go:179] failed checking apiserver connectivity: Unauthorized\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2444e31d4c252ddbff520f5604104b24d3d356ad1c13579d3c22e3e12136de0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36a6674fde1a5972f2881ddc34b464ea203d05e1e48901c4547ea4aa99085faa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.331698 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.356329 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.374809 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.391039 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.406928 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3059be4d-025f-48c7-8d37-edc542161c80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.427386 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.444705 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.465391 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.486426 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:54Z\\\",\\\"message\\\":\\\"ck:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0227 16:25:54.377911 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377922 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377930 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377931 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377936 6908 ovn.go:134] Ensuring zone local for Pod openshift-etcd/etcd-crc in node crc\\\\nI0227 16:25:54.377895 6908 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld in node crc\\\\nI0227 16:25:54.377943 6908 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI0227 16:25:54.377947 6908 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF0227 16:25:54.377951 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has sto\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.501201 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.518105 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.520362 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.520465 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.520465 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:24 crc kubenswrapper[4751]: E0227 16:26:24.520560 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:24 crc kubenswrapper[4751]: E0227 16:26:24.520641 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.520651 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:24 crc kubenswrapper[4751]: E0227 16:26:24.520730 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:24 crc kubenswrapper[4751]: E0227 16:26:24.520907 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.534465 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.547250 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.560212 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.574677 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:26:16Z\\\",\\\"message\\\":\\\"2026-02-27T16:25:30+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca\\\\n2026-02-27T16:25:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca to /host/opt/cni/bin/\\\\n2026-02-27T16:25:30Z [verbose] multus-daemon started\\\\n2026-02-27T16:25:30Z [verbose] Readiness Indicator file check\\\\n2026-02-27T16:26:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.585845 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:24 crc kubenswrapper[4751]: I0227 16:26:24.596691 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.293070 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/3.log" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.294292 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/2.log" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.298916 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" exitCode=1 Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.298989 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e"} Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.299051 4751 scope.go:117] "RemoveContainer" containerID="674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.301529 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:26:25 crc kubenswrapper[4751]: E0227 16:26:25.301890 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.314966 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.335095 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.355344 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.373504 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.388088 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.404645 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:26:16Z\\\",\\\"message\\\":\\\"2026-02-27T16:25:30+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca\\\\n2026-02-27T16:25:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca to /host/opt/cni/bin/\\\\n2026-02-27T16:25:30Z [verbose] multus-daemon started\\\\n2026-02-27T16:25:30Z [verbose] Readiness Indicator file check\\\\n2026-02-27T16:26:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.417580 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.429247 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"744a750b-0dea-4975-9f4b-b9fee0972208\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4cbc47ef2524239d9c1679189bfa96296620af4fcfe02507695d700d6455eda4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:31Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0227 16:24:00.806855 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0227 16:24:00.808926 1 observer_polling.go:159] Starting file observer\\\\nI0227 16:24:00.862719 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI0227 16:24:00.868567 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0227 16:24:31.264185 1 cmd.go:179] failed checking apiserver connectivity: Unauthorized\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2444e31d4c252ddbff520f5604104b24d3d356ad1c13579d3c22e3e12136de0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36a6674fde1a5972f2881ddc34b464ea203d05e1e48901c4547ea4aa99085faa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.441084 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.464036 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.473142 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.483770 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.494572 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3059be4d-025f-48c7-8d37-edc542161c80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.506832 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.517594 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.534126 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.556625 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://674f19e5adbd0664e39863cd818390c01ee4518e6452f8c668fe0de5d2c9e43d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:25:54Z\\\",\\\"message\\\":\\\"ck:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0227 16:25:54.377911 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377922 6908 obj_retry.go:303] Retry object setup: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377930 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-etcd/etcd-crc\\\\nI0227 16:25:54.377931 6908 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0227 16:25:54.377936 6908 ovn.go:134] Ensuring zone local for Pod openshift-etcd/etcd-crc in node crc\\\\nI0227 16:25:54.377895 6908 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld in node crc\\\\nI0227 16:25:54.377943 6908 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI0227 16:25:54.377947 6908 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF0227 16:25:54.377951 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has sto\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:26:24Z\\\",\\\"message\\\":\\\"chine-approver for network=default\\\\nI0227 16:26:24.478923 7232 services_controller.go:356] Processing sync for service openshift-image-registry/image-registry-operator for network=default\\\\nF0227 16:26:24.478917 7232 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z]\\\\nI0227 16:26:24.478937 7232 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 15.59µs\\\\nI0227 16:26:24.478955 7232 services_controller.go:356] Processing sync for service openshift-kube-controller-manager/kube-controller-manager for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:25 crc kubenswrapper[4751]: I0227 16:26:25.567175 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:25Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.305966 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/3.log" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.310790 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:26:26 crc kubenswrapper[4751]: E0227 16:26:26.310946 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.326947 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.342246 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.356238 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.374304 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.389907 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:26:16Z\\\",\\\"message\\\":\\\"2026-02-27T16:25:30+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca\\\\n2026-02-27T16:25:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca to /host/opt/cni/bin/\\\\n2026-02-27T16:25:30Z [verbose] multus-daemon started\\\\n2026-02-27T16:25:30Z [verbose] Readiness Indicator file check\\\\n2026-02-27T16:26:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.405802 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.423024 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.440996 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"744a750b-0dea-4975-9f4b-b9fee0972208\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4cbc47ef2524239d9c1679189bfa96296620af4fcfe02507695d700d6455eda4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:31Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0227 16:24:00.806855 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0227 16:24:00.808926 1 observer_polling.go:159] Starting file observer\\\\nI0227 16:24:00.862719 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI0227 16:24:00.868567 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0227 16:24:31.264185 1 cmd.go:179] failed checking apiserver connectivity: Unauthorized\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2444e31d4c252ddbff520f5604104b24d3d356ad1c13579d3c22e3e12136de0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36a6674fde1a5972f2881ddc34b464ea203d05e1e48901c4547ea4aa99085faa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.456266 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.482121 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.496539 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.513332 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.519959 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.520032 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.520063 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.520063 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:26 crc kubenswrapper[4751]: E0227 16:26:26.520117 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:26 crc kubenswrapper[4751]: E0227 16:26:26.520214 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:26 crc kubenswrapper[4751]: E0227 16:26:26.520282 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:26 crc kubenswrapper[4751]: E0227 16:26:26.520335 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.531485 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3059be4d-025f-48c7-8d37-edc542161c80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.554236 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.572888 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.589240 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.608390 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:26:24Z\\\",\\\"message\\\":\\\"chine-approver for network=default\\\\nI0227 16:26:24.478923 7232 services_controller.go:356] Processing sync for service openshift-image-registry/image-registry-operator for network=default\\\\nF0227 16:26:24.478917 7232 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z]\\\\nI0227 16:26:24.478937 7232 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 15.59µs\\\\nI0227 16:26:24.478955 7232 services_controller.go:356] Processing sync for service openshift-kube-controller-manager/kube-controller-manager for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:26:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:26 crc kubenswrapper[4751]: I0227 16:26:26.622713 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:26Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.520595 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.520629 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.520698 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:28 crc kubenswrapper[4751]: E0227 16:26:28.520813 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.521145 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:28 crc kubenswrapper[4751]: E0227 16:26:28.521255 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:28 crc kubenswrapper[4751]: E0227 16:26:28.521525 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:28 crc kubenswrapper[4751]: E0227 16:26:28.521770 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.542870 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f15bd036cdf81b96a943216ca032d5affa0af0fd646bb28ae3b7a636b574c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.566055 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"744a750b-0dea-4975-9f4b-b9fee0972208\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4cbc47ef2524239d9c1679189bfa96296620af4fcfe02507695d700d6455eda4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4017f08d0b8cb9d514bc164c51e34a262f9cf825e254befdeb07348060d12fd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:31Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0227 16:24:00.806855 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0227 16:24:00.808926 1 observer_polling.go:159] Starting file observer\\\\nI0227 16:24:00.862719 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI0227 16:24:00.868567 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0227 16:24:31.264185 1 cmd.go:179] failed checking apiserver connectivity: Unauthorized\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2444e31d4c252ddbff520f5604104b24d3d356ad1c13579d3c22e3e12136de0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://36a6674fde1a5972f2881ddc34b464ea203d05e1e48901c4547ea4aa99085faa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.585689 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9c6p9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac07f9c0-4eff-4c84-8020-ae183619eae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee65c30e0707c51ba4810f1d8c75c754018031b0930110f3eb042f282d9d298\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4qdqf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9c6p9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.605607 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://614d0112e930bca3d5140bae31b431f0920a5231ebfe733de9cbcda80638de4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnvnb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rkcdq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: E0227 16:26:28.633871 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.640388 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c09692dc-ede0-4abf-b370-0746a09a3285\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80337bb8baab11860ebf1376e814c50ee29ed4753d9cf943363813eed97ebdb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef6090b6b35f21dfbe6d596d98ace0147a831021d5e879149b2c60e338e46ee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c53c723a2249cddf16f9817dc34608f5742c9b762d9a66be0e93e0b843d47f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07b1cdbe0a524d5194b02185a86fcff64ddd83a916c9e0dc0fa630e123f054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d16b1942494e44a2797d349c20548a3644b2a3b147c57563a8382a62de218b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b043f5c74e96225a8a07f281bd8080f032f00d7479687ea9446fb22b7432c0dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9325dbab1c8e61ea233d3bc5829fdab4f6d503d890e2f12e4599bcf5351ac5cd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4da2a9166e2c98274d3daff669efb74d5ad0366eaf3c24c7560814ad33de0096\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:24:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.662612 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"66626c61-3f6b-48d2-92e6-a061f0c0a2bb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-27T16:24:57Z\\\",\\\"message\\\":\\\"le observer\\\\nW0227 16:24:57.530693 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0227 16:24:57.530868 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0227 16:24:57.531942 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4127051073/tls.crt::/tmp/serving-cert-4127051073/tls.key\\\\\\\"\\\\nI0227 16:24:57.954150 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0227 16:24:57.958430 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0227 16:24:57.958455 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0227 16:24:57.958486 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0227 16:24:57.958493 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0227 16:24:57.964926 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0227 16:24:57.964950 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0227 16:24:57.964998 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0227 16:24:57.964958 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0227 16:24:57.965028 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0227 16:24:57.965032 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0227 16:24:57.965036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0227 16:24:57.965045 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0227 16:24:57.966125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:24:56Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.683882 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.709765 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zfn22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fff69b03-aefa-4148-aa53-2d0f3501eafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae804072caefc03aa58e843a4d0ce899c98b2508b1088adecedd725536509a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3fdefe811670ca5091f6136e38cf3dcec56944843cdc805d90bc87652ac4e56a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47a5713f8444c8502bba5af923f5e76634948539b3f65f4dea613acef3aa53b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e0113cb79777ae7cd1005adbe963366d3a0baba00810f10cf019f78c02d4fad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4edffac90a1f821ce531172607c9735d56c4324e06cd33c8917c4d41557789d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b2803de20cf64d027d2751aeedf75d8c22083edd2e6d4f28bf38e8764d9db5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1db0ab79f102177fdb102bd9b679ba5a262764b9f49d20b28aa83bcddf8f71b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4mlg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zfn22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.742317 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a3f89b-11cb-4336-962d-c6835c5f758e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:26:24Z\\\",\\\"message\\\":\\\"chine-approver for network=default\\\\nI0227 16:26:24.478923 7232 services_controller.go:356] Processing sync for service openshift-image-registry/image-registry-operator for network=default\\\\nF0227 16:26:24.478917 7232 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:24Z is after 2025-08-24T17:21:41Z]\\\\nI0227 16:26:24.478937 7232 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 15.59µs\\\\nI0227 16:26:24.478955 7232 services_controller.go:356] Processing sync for service openshift-kube-controller-manager/kube-controller-manager for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:26:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nm8jw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vpxjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.762390 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44a8652c-fec7-4403-8f80-37bae0514e16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://950ef02aefca41bf13ee42609755d72a6d397cadd3ef8e6084b6a23093a0c0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a068c1db96852081b2960c70351b0efc4dd9d2d07bc621bcc8cc7cf52cca611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-25lx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dvbld\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.781387 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3059be4d-025f-48c7-8d37-edc542161c80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:24:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:23:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4372e903bbffcc7bae3515e3443a8c3b00a773bb51373ec2725cbae4bc309bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc4cc06a7fbfc76c4cc44e86e3a81212db634cf9727ae28b857150dc89f104e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6392f00602ed84ba23d03f7e082004ef85e622f340c36812094984606cfed32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:24:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://275d6054505cb9a29221cf933ffc2b436856047e0a74a8068dfad90cb1006a57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-27T16:23:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-27T16:23:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:23:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.802317 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d1db3ab9f8e96216969db4b19894e87d878d5a2cab072876e3f8c8f8c64955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba9d81a438d9b028d45b0c56552267b80cd7723bb8ab5420c152e31502ea2e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.832579 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.851603 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.875147 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4jc4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc07559e-a5c7-458c-b3ec-646981b798c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-27T16:26:16Z\\\",\\\"message\\\":\\\"2026-02-27T16:25:30+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca\\\\n2026-02-27T16:25:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_fe1fcaa1-b815-44b0-8dcb-29abab8513ca to /host/opt/cni/bin/\\\\n2026-02-27T16:25:30Z [verbose] multus-daemon started\\\\n2026-02-27T16:25:30Z [verbose] Readiness Indicator file check\\\\n2026-02-27T16:26:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnxq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4jc4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.884830 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7da183a7-dcda-4e22-b135-b1ef0d593811\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jp49\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4bnbv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.894195 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w9n9j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5219b077-c7f8-41e9-831b-9b7dae574b9f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b78054d3960ff2cfcbddb6d9f4a479124dc9a34934381eda3dc30f58c1520ff4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x8p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-27T16:25:28Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w9n9j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:28 crc kubenswrapper[4751]: I0227 16:26:28.905091 4751 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-27T16:25:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43ad2e71d838ce1b29bd8c06f6cef46e21bedaea9b317baca3bab6fd35e2041e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-27T16:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:28Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.495691 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.496573 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.496613 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.496638 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.496655 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:30Z","lastTransitionTime":"2026-02-27T16:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:30 crc kubenswrapper[4751]: E0227 16:26:30.516562 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.520088 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:30 crc kubenswrapper[4751]: E0227 16:26:30.520175 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.520283 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.520306 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.520426 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:30 crc kubenswrapper[4751]: E0227 16:26:30.520435 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:30 crc kubenswrapper[4751]: E0227 16:26:30.520495 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.520542 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.520598 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.520623 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:30 crc kubenswrapper[4751]: E0227 16:26:30.520638 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.520654 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.520710 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:30Z","lastTransitionTime":"2026-02-27T16:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:30 crc kubenswrapper[4751]: E0227 16:26:30.542017 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.545392 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.545450 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.545462 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.545476 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.545488 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:30Z","lastTransitionTime":"2026-02-27T16:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:30 crc kubenswrapper[4751]: E0227 16:26:30.563856 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.568898 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.568960 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.568985 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.569015 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.569035 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:30Z","lastTransitionTime":"2026-02-27T16:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:30 crc kubenswrapper[4751]: E0227 16:26:30.588849 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.593450 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.593499 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.593535 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.593556 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:30 crc kubenswrapper[4751]: I0227 16:26:30.593573 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:30Z","lastTransitionTime":"2026-02-27T16:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:30 crc kubenswrapper[4751]: E0227 16:26:30.613945 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-27T16:26:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0e60b9e9-7743-4d82-b22f-dfb39efa49a2\\\",\\\"systemUUID\\\":\\\"b400131a-a657-46e7-ab90-a8b42c88e909\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-27T16:26:30Z is after 2025-08-24T17:21:41Z" Feb 27 16:26:30 crc kubenswrapper[4751]: E0227 16:26:30.614181 4751 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 27 16:26:32 crc kubenswrapper[4751]: I0227 16:26:32.464984 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.465173 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.465142631 +0000 UTC m=+218.612157119 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:26:32 crc kubenswrapper[4751]: I0227 16:26:32.465268 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:32 crc kubenswrapper[4751]: I0227 16:26:32.465342 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:32 crc kubenswrapper[4751]: I0227 16:26:32.465456 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.465538 4751 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.465603 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.465665 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.465738 4751 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.465627 4751 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.465643 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.465619523 +0000 UTC m=+218.612634010 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.465852 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.465831599 +0000 UTC m=+218.612846086 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.465890 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.46587733 +0000 UTC m=+218.612891807 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:26:32 crc kubenswrapper[4751]: I0227 16:26:32.519838 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:32 crc kubenswrapper[4751]: I0227 16:26:32.519987 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.520039 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:32 crc kubenswrapper[4751]: I0227 16:26:32.520098 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.520247 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.520324 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:32 crc kubenswrapper[4751]: I0227 16:26:32.520383 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.520687 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:32 crc kubenswrapper[4751]: I0227 16:26:32.566579 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:32 crc kubenswrapper[4751]: I0227 16:26:32.566667 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.566796 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.566813 4751 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.566839 4751 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.566859 4751 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.566898 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs podName:7da183a7-dcda-4e22-b135-b1ef0d593811 nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.566876264 +0000 UTC m=+218.713890741 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs") pod "network-metrics-daemon-4bnbv" (UID: "7da183a7-dcda-4e22-b135-b1ef0d593811") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 27 16:26:32 crc kubenswrapper[4751]: E0227 16:26:32.566934 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.566910225 +0000 UTC m=+218.713924712 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 27 16:26:33 crc kubenswrapper[4751]: I0227 16:26:33.534791 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Feb 27 16:26:33 crc kubenswrapper[4751]: E0227 16:26:33.634786 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:34 crc kubenswrapper[4751]: I0227 16:26:34.520078 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:34 crc kubenswrapper[4751]: I0227 16:26:34.520168 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:34 crc kubenswrapper[4751]: I0227 16:26:34.520234 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:34 crc kubenswrapper[4751]: I0227 16:26:34.520108 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:34 crc kubenswrapper[4751]: E0227 16:26:34.520262 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:34 crc kubenswrapper[4751]: E0227 16:26:34.520383 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:34 crc kubenswrapper[4751]: E0227 16:26:34.520566 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:34 crc kubenswrapper[4751]: E0227 16:26:34.520664 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:36 crc kubenswrapper[4751]: I0227 16:26:36.520536 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:36 crc kubenswrapper[4751]: I0227 16:26:36.520591 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:36 crc kubenswrapper[4751]: E0227 16:26:36.520702 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:36 crc kubenswrapper[4751]: I0227 16:26:36.520771 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:36 crc kubenswrapper[4751]: I0227 16:26:36.520779 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:36 crc kubenswrapper[4751]: E0227 16:26:36.520889 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:36 crc kubenswrapper[4751]: E0227 16:26:36.521086 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:36 crc kubenswrapper[4751]: E0227 16:26:36.521158 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.520334 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:38 crc kubenswrapper[4751]: E0227 16:26:38.520483 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.520625 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.520692 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:38 crc kubenswrapper[4751]: E0227 16:26:38.520765 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.520799 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:38 crc kubenswrapper[4751]: E0227 16:26:38.520985 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:38 crc kubenswrapper[4751]: E0227 16:26:38.521106 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.559170 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dvbld" podStartSLOduration=116.55914634 podStartE2EDuration="1m56.55914634s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.545666782 +0000 UTC m=+160.692681229" watchObservedRunningTime="2026-02-27 16:26:38.55914634 +0000 UTC m=+160.706160787" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.559369 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=41.559363336 podStartE2EDuration="41.559363336s" podCreationTimestamp="2026-02-27 16:25:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.558019672 +0000 UTC m=+160.705034119" watchObservedRunningTime="2026-02-27 16:26:38.559363336 +0000 UTC m=+160.706377783" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.603540 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=70.603505423 podStartE2EDuration="1m10.603505423s" podCreationTimestamp="2026-02-27 16:25:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.582996839 +0000 UTC m=+160.730011316" watchObservedRunningTime="2026-02-27 16:26:38.603505423 +0000 UTC m=+160.750519870" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.629390 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-zfn22" podStartSLOduration=117.629372822 podStartE2EDuration="1m57.629372822s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.626005108 +0000 UTC m=+160.773019555" watchObservedRunningTime="2026-02-27 16:26:38.629372822 +0000 UTC m=+160.776387269" Feb 27 16:26:38 crc kubenswrapper[4751]: E0227 16:26:38.635207 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.687526 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-w9n9j" podStartSLOduration=117.687506431 podStartE2EDuration="1m57.687506431s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.687223143 +0000 UTC m=+160.834237590" watchObservedRunningTime="2026-02-27 16:26:38.687506431 +0000 UTC m=+160.834520878" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.755841 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-4jc4n" podStartSLOduration=117.755825845 podStartE2EDuration="1m57.755825845s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.755264601 +0000 UTC m=+160.902279048" watchObservedRunningTime="2026-02-27 16:26:38.755825845 +0000 UTC m=+160.902840292" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.775322 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=31.775302363 podStartE2EDuration="31.775302363s" podCreationTimestamp="2026-02-27 16:26:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.773153559 +0000 UTC m=+160.920168016" watchObservedRunningTime="2026-02-27 16:26:38.775302363 +0000 UTC m=+160.922316810" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.786435 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=5.786396802 podStartE2EDuration="5.786396802s" podCreationTimestamp="2026-02-27 16:26:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.785733525 +0000 UTC m=+160.932747972" watchObservedRunningTime="2026-02-27 16:26:38.786396802 +0000 UTC m=+160.933411249" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.821803 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=51.821784739 podStartE2EDuration="51.821784739s" podCreationTimestamp="2026-02-27 16:25:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.821445631 +0000 UTC m=+160.968460088" watchObservedRunningTime="2026-02-27 16:26:38.821784739 +0000 UTC m=+160.968799186" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.831928 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-9c6p9" podStartSLOduration=117.831906233 podStartE2EDuration="1m57.831906233s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.831425331 +0000 UTC m=+160.978439778" watchObservedRunningTime="2026-02-27 16:26:38.831906233 +0000 UTC m=+160.978920680" Feb 27 16:26:38 crc kubenswrapper[4751]: I0227 16:26:38.845340 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podStartSLOduration=117.84530408 podStartE2EDuration="1m57.84530408s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:38.844759176 +0000 UTC m=+160.991773633" watchObservedRunningTime="2026-02-27 16:26:38.84530408 +0000 UTC m=+160.992318587" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.520211 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.520323 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.520821 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:40 crc kubenswrapper[4751]: E0227 16:26:40.521000 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.521390 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:26:40 crc kubenswrapper[4751]: E0227 16:26:40.521669 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.521784 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:40 crc kubenswrapper[4751]: E0227 16:26:40.521894 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:40 crc kubenswrapper[4751]: E0227 16:26:40.522010 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:40 crc kubenswrapper[4751]: E0227 16:26:40.522118 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.672112 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.672184 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.672256 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.672285 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.672303 4751 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-27T16:26:40Z","lastTransitionTime":"2026-02-27T16:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.737840 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf"] Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.738570 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.740464 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.741496 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.741780 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.742026 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.833449 4751 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.843078 4751 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.850960 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f1e25376-8244-4b5c-82fe-ba6deccbad1a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.851010 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f1e25376-8244-4b5c-82fe-ba6deccbad1a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.851064 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f1e25376-8244-4b5c-82fe-ba6deccbad1a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.851088 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e25376-8244-4b5c-82fe-ba6deccbad1a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.851159 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1e25376-8244-4b5c-82fe-ba6deccbad1a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.952114 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1e25376-8244-4b5c-82fe-ba6deccbad1a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.952320 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f1e25376-8244-4b5c-82fe-ba6deccbad1a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.952373 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f1e25376-8244-4b5c-82fe-ba6deccbad1a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.952566 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f1e25376-8244-4b5c-82fe-ba6deccbad1a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.952626 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e25376-8244-4b5c-82fe-ba6deccbad1a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.952650 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f1e25376-8244-4b5c-82fe-ba6deccbad1a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.952698 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f1e25376-8244-4b5c-82fe-ba6deccbad1a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.954302 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f1e25376-8244-4b5c-82fe-ba6deccbad1a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.962277 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e25376-8244-4b5c-82fe-ba6deccbad1a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:40 crc kubenswrapper[4751]: I0227 16:26:40.989442 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1e25376-8244-4b5c-82fe-ba6deccbad1a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-8m6wf\" (UID: \"f1e25376-8244-4b5c-82fe-ba6deccbad1a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:41 crc kubenswrapper[4751]: I0227 16:26:41.053014 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" Feb 27 16:26:41 crc kubenswrapper[4751]: W0227 16:26:41.074194 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1e25376_8244_4b5c_82fe_ba6deccbad1a.slice/crio-d17183ecf48d96b75e2623ffa207fb8fc052b15795bef6c2ca8745728b224d4f WatchSource:0}: Error finding container d17183ecf48d96b75e2623ffa207fb8fc052b15795bef6c2ca8745728b224d4f: Status 404 returned error can't find the container with id d17183ecf48d96b75e2623ffa207fb8fc052b15795bef6c2ca8745728b224d4f Feb 27 16:26:41 crc kubenswrapper[4751]: I0227 16:26:41.366377 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" event={"ID":"f1e25376-8244-4b5c-82fe-ba6deccbad1a","Type":"ContainerStarted","Data":"63dcfbcdb44416d0b623923e83d3808af20966a1cfbce4bac462dacba2009a83"} Feb 27 16:26:41 crc kubenswrapper[4751]: I0227 16:26:41.366478 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" event={"ID":"f1e25376-8244-4b5c-82fe-ba6deccbad1a","Type":"ContainerStarted","Data":"d17183ecf48d96b75e2623ffa207fb8fc052b15795bef6c2ca8745728b224d4f"} Feb 27 16:26:41 crc kubenswrapper[4751]: I0227 16:26:41.387974 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8m6wf" podStartSLOduration=120.387937562 podStartE2EDuration="2m0.387937562s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:26:41.385656125 +0000 UTC m=+163.532670592" watchObservedRunningTime="2026-02-27 16:26:41.387937562 +0000 UTC m=+163.534952099" Feb 27 16:26:42 crc kubenswrapper[4751]: I0227 16:26:42.520647 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:42 crc kubenswrapper[4751]: I0227 16:26:42.520686 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:42 crc kubenswrapper[4751]: I0227 16:26:42.520647 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:42 crc kubenswrapper[4751]: I0227 16:26:42.520770 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:42 crc kubenswrapper[4751]: E0227 16:26:42.520790 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:42 crc kubenswrapper[4751]: E0227 16:26:42.520861 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:42 crc kubenswrapper[4751]: E0227 16:26:42.520970 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:42 crc kubenswrapper[4751]: E0227 16:26:42.521244 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:43 crc kubenswrapper[4751]: E0227 16:26:43.637016 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:44 crc kubenswrapper[4751]: I0227 16:26:44.519787 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:44 crc kubenswrapper[4751]: I0227 16:26:44.519832 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:44 crc kubenswrapper[4751]: I0227 16:26:44.519904 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:44 crc kubenswrapper[4751]: I0227 16:26:44.519959 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:44 crc kubenswrapper[4751]: E0227 16:26:44.519913 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:44 crc kubenswrapper[4751]: E0227 16:26:44.520110 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:44 crc kubenswrapper[4751]: E0227 16:26:44.520183 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:44 crc kubenswrapper[4751]: E0227 16:26:44.520246 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:46 crc kubenswrapper[4751]: I0227 16:26:46.520243 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:46 crc kubenswrapper[4751]: I0227 16:26:46.520350 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:46 crc kubenswrapper[4751]: E0227 16:26:46.520785 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:46 crc kubenswrapper[4751]: I0227 16:26:46.520469 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:46 crc kubenswrapper[4751]: E0227 16:26:46.520833 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:46 crc kubenswrapper[4751]: I0227 16:26:46.520390 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:46 crc kubenswrapper[4751]: E0227 16:26:46.520907 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:46 crc kubenswrapper[4751]: E0227 16:26:46.520990 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:48 crc kubenswrapper[4751]: I0227 16:26:48.520824 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:48 crc kubenswrapper[4751]: E0227 16:26:48.521545 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:48 crc kubenswrapper[4751]: I0227 16:26:48.521629 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:48 crc kubenswrapper[4751]: I0227 16:26:48.521614 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:48 crc kubenswrapper[4751]: I0227 16:26:48.521667 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:48 crc kubenswrapper[4751]: E0227 16:26:48.522314 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:48 crc kubenswrapper[4751]: E0227 16:26:48.522630 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:48 crc kubenswrapper[4751]: E0227 16:26:48.522770 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:48 crc kubenswrapper[4751]: E0227 16:26:48.638208 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:50 crc kubenswrapper[4751]: I0227 16:26:50.520234 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:50 crc kubenswrapper[4751]: I0227 16:26:50.520309 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:50 crc kubenswrapper[4751]: E0227 16:26:50.520499 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:50 crc kubenswrapper[4751]: I0227 16:26:50.520593 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:50 crc kubenswrapper[4751]: E0227 16:26:50.520741 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:50 crc kubenswrapper[4751]: E0227 16:26:50.520872 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:50 crc kubenswrapper[4751]: I0227 16:26:50.520953 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:50 crc kubenswrapper[4751]: E0227 16:26:50.521019 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:51 crc kubenswrapper[4751]: I0227 16:26:51.521316 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:26:51 crc kubenswrapper[4751]: E0227 16:26:51.521653 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" Feb 27 16:26:52 crc kubenswrapper[4751]: I0227 16:26:52.520387 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:52 crc kubenswrapper[4751]: I0227 16:26:52.520485 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:52 crc kubenswrapper[4751]: I0227 16:26:52.520593 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:52 crc kubenswrapper[4751]: I0227 16:26:52.520387 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:52 crc kubenswrapper[4751]: E0227 16:26:52.520715 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:52 crc kubenswrapper[4751]: E0227 16:26:52.520829 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:52 crc kubenswrapper[4751]: E0227 16:26:52.520997 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:52 crc kubenswrapper[4751]: E0227 16:26:52.521237 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:53 crc kubenswrapper[4751]: E0227 16:26:53.640212 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:26:54 crc kubenswrapper[4751]: I0227 16:26:54.519692 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:54 crc kubenswrapper[4751]: E0227 16:26:54.519857 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:54 crc kubenswrapper[4751]: I0227 16:26:54.520151 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:54 crc kubenswrapper[4751]: E0227 16:26:54.520245 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:54 crc kubenswrapper[4751]: I0227 16:26:54.520377 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:54 crc kubenswrapper[4751]: I0227 16:26:54.520477 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:54 crc kubenswrapper[4751]: E0227 16:26:54.520613 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:54 crc kubenswrapper[4751]: E0227 16:26:54.520760 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:56 crc kubenswrapper[4751]: I0227 16:26:56.520538 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:56 crc kubenswrapper[4751]: I0227 16:26:56.520652 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:56 crc kubenswrapper[4751]: I0227 16:26:56.520689 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:56 crc kubenswrapper[4751]: I0227 16:26:56.520779 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:56 crc kubenswrapper[4751]: E0227 16:26:56.521022 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:56 crc kubenswrapper[4751]: E0227 16:26:56.521117 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:56 crc kubenswrapper[4751]: E0227 16:26:56.521349 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:56 crc kubenswrapper[4751]: E0227 16:26:56.521848 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:58 crc kubenswrapper[4751]: I0227 16:26:58.519904 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:26:58 crc kubenswrapper[4751]: I0227 16:26:58.519949 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:26:58 crc kubenswrapper[4751]: I0227 16:26:58.519982 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:26:58 crc kubenswrapper[4751]: I0227 16:26:58.520070 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:26:58 crc kubenswrapper[4751]: E0227 16:26:58.521156 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:26:58 crc kubenswrapper[4751]: E0227 16:26:58.521298 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:26:58 crc kubenswrapper[4751]: E0227 16:26:58.521574 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:26:58 crc kubenswrapper[4751]: E0227 16:26:58.521679 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:26:58 crc kubenswrapper[4751]: E0227 16:26:58.640875 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:27:00 crc kubenswrapper[4751]: I0227 16:27:00.519979 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:00 crc kubenswrapper[4751]: I0227 16:27:00.520070 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:00 crc kubenswrapper[4751]: I0227 16:27:00.520183 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:00 crc kubenswrapper[4751]: E0227 16:27:00.520170 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:00 crc kubenswrapper[4751]: I0227 16:27:00.520260 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:00 crc kubenswrapper[4751]: E0227 16:27:00.520369 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:00 crc kubenswrapper[4751]: E0227 16:27:00.520585 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:00 crc kubenswrapper[4751]: E0227 16:27:00.520748 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:02 crc kubenswrapper[4751]: I0227 16:27:02.449451 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/1.log" Feb 27 16:27:02 crc kubenswrapper[4751]: I0227 16:27:02.450248 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/0.log" Feb 27 16:27:02 crc kubenswrapper[4751]: I0227 16:27:02.450333 4751 generic.go:334] "Generic (PLEG): container finished" podID="dc07559e-a5c7-458c-b3ec-646981b798c1" containerID="1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6" exitCode=1 Feb 27 16:27:02 crc kubenswrapper[4751]: I0227 16:27:02.450385 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4jc4n" event={"ID":"dc07559e-a5c7-458c-b3ec-646981b798c1","Type":"ContainerDied","Data":"1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6"} Feb 27 16:27:02 crc kubenswrapper[4751]: I0227 16:27:02.450479 4751 scope.go:117] "RemoveContainer" containerID="e6a8cdc3937070c0644b8e2ef336b4b2bf132206c5f224b050910368b4de6b96" Feb 27 16:27:02 crc kubenswrapper[4751]: I0227 16:27:02.451321 4751 scope.go:117] "RemoveContainer" containerID="1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6" Feb 27 16:27:02 crc kubenswrapper[4751]: E0227 16:27:02.451717 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-4jc4n_openshift-multus(dc07559e-a5c7-458c-b3ec-646981b798c1)\"" pod="openshift-multus/multus-4jc4n" podUID="dc07559e-a5c7-458c-b3ec-646981b798c1" Feb 27 16:27:02 crc kubenswrapper[4751]: I0227 16:27:02.520591 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:02 crc kubenswrapper[4751]: I0227 16:27:02.520686 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:02 crc kubenswrapper[4751]: I0227 16:27:02.520622 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:02 crc kubenswrapper[4751]: E0227 16:27:02.520836 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:02 crc kubenswrapper[4751]: I0227 16:27:02.520882 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:02 crc kubenswrapper[4751]: E0227 16:27:02.521133 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:02 crc kubenswrapper[4751]: E0227 16:27:02.521263 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:02 crc kubenswrapper[4751]: E0227 16:27:02.521425 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:03 crc kubenswrapper[4751]: I0227 16:27:03.455310 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/1.log" Feb 27 16:27:03 crc kubenswrapper[4751]: I0227 16:27:03.521733 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:27:03 crc kubenswrapper[4751]: E0227 16:27:03.522001 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vpxjd_openshift-ovn-kubernetes(45a3f89b-11cb-4336-962d-c6835c5f758e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" Feb 27 16:27:03 crc kubenswrapper[4751]: E0227 16:27:03.642047 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:27:04 crc kubenswrapper[4751]: I0227 16:27:04.520084 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:04 crc kubenswrapper[4751]: I0227 16:27:04.520158 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:04 crc kubenswrapper[4751]: I0227 16:27:04.520251 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:04 crc kubenswrapper[4751]: E0227 16:27:04.520457 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:04 crc kubenswrapper[4751]: I0227 16:27:04.520516 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:04 crc kubenswrapper[4751]: E0227 16:27:04.520698 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:04 crc kubenswrapper[4751]: E0227 16:27:04.520848 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:04 crc kubenswrapper[4751]: E0227 16:27:04.520969 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:06 crc kubenswrapper[4751]: I0227 16:27:06.520231 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:06 crc kubenswrapper[4751]: I0227 16:27:06.520247 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:06 crc kubenswrapper[4751]: I0227 16:27:06.520451 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:06 crc kubenswrapper[4751]: E0227 16:27:06.520543 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:06 crc kubenswrapper[4751]: E0227 16:27:06.520677 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:06 crc kubenswrapper[4751]: I0227 16:27:06.520737 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:06 crc kubenswrapper[4751]: E0227 16:27:06.520791 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:06 crc kubenswrapper[4751]: E0227 16:27:06.520936 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:08 crc kubenswrapper[4751]: I0227 16:27:08.520690 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:08 crc kubenswrapper[4751]: I0227 16:27:08.520761 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:08 crc kubenswrapper[4751]: I0227 16:27:08.520804 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:08 crc kubenswrapper[4751]: I0227 16:27:08.520793 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:08 crc kubenswrapper[4751]: E0227 16:27:08.522148 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:08 crc kubenswrapper[4751]: E0227 16:27:08.522259 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:08 crc kubenswrapper[4751]: E0227 16:27:08.522428 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:08 crc kubenswrapper[4751]: E0227 16:27:08.522604 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:08 crc kubenswrapper[4751]: E0227 16:27:08.642992 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:27:10 crc kubenswrapper[4751]: I0227 16:27:10.520048 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:10 crc kubenswrapper[4751]: I0227 16:27:10.520099 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:10 crc kubenswrapper[4751]: I0227 16:27:10.520228 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:10 crc kubenswrapper[4751]: E0227 16:27:10.520440 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:10 crc kubenswrapper[4751]: E0227 16:27:10.520600 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:10 crc kubenswrapper[4751]: E0227 16:27:10.520744 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:10 crc kubenswrapper[4751]: I0227 16:27:10.521364 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:10 crc kubenswrapper[4751]: E0227 16:27:10.521668 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:12 crc kubenswrapper[4751]: I0227 16:27:12.520527 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:12 crc kubenswrapper[4751]: I0227 16:27:12.520591 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:12 crc kubenswrapper[4751]: E0227 16:27:12.520696 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:12 crc kubenswrapper[4751]: I0227 16:27:12.520949 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:12 crc kubenswrapper[4751]: E0227 16:27:12.521033 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:12 crc kubenswrapper[4751]: I0227 16:27:12.521144 4751 scope.go:117] "RemoveContainer" containerID="1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6" Feb 27 16:27:12 crc kubenswrapper[4751]: I0227 16:27:12.521620 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:12 crc kubenswrapper[4751]: E0227 16:27:12.521719 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:12 crc kubenswrapper[4751]: E0227 16:27:12.522082 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:13 crc kubenswrapper[4751]: I0227 16:27:13.492681 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/1.log" Feb 27 16:27:13 crc kubenswrapper[4751]: I0227 16:27:13.492757 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4jc4n" event={"ID":"dc07559e-a5c7-458c-b3ec-646981b798c1","Type":"ContainerStarted","Data":"078030dbb5992de0c1bc4d1619c873a920f892cb1faa8ac404dbc20bb29ea6b8"} Feb 27 16:27:13 crc kubenswrapper[4751]: E0227 16:27:13.644424 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:27:14 crc kubenswrapper[4751]: I0227 16:27:14.520606 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:14 crc kubenswrapper[4751]: I0227 16:27:14.520719 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:14 crc kubenswrapper[4751]: I0227 16:27:14.520849 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:14 crc kubenswrapper[4751]: I0227 16:27:14.520876 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:14 crc kubenswrapper[4751]: E0227 16:27:14.521227 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:14 crc kubenswrapper[4751]: E0227 16:27:14.521327 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:14 crc kubenswrapper[4751]: E0227 16:27:14.521383 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:14 crc kubenswrapper[4751]: E0227 16:27:14.521459 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:16 crc kubenswrapper[4751]: I0227 16:27:16.520241 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:16 crc kubenswrapper[4751]: E0227 16:27:16.520568 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:16 crc kubenswrapper[4751]: I0227 16:27:16.520667 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:16 crc kubenswrapper[4751]: E0227 16:27:16.520792 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:16 crc kubenswrapper[4751]: I0227 16:27:16.520836 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:16 crc kubenswrapper[4751]: I0227 16:27:16.520852 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:16 crc kubenswrapper[4751]: E0227 16:27:16.520878 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:16 crc kubenswrapper[4751]: E0227 16:27:16.520925 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:18 crc kubenswrapper[4751]: I0227 16:27:18.519822 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:18 crc kubenswrapper[4751]: I0227 16:27:18.521928 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:18 crc kubenswrapper[4751]: I0227 16:27:18.521921 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:18 crc kubenswrapper[4751]: I0227 16:27:18.521984 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:18 crc kubenswrapper[4751]: E0227 16:27:18.522102 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:18 crc kubenswrapper[4751]: E0227 16:27:18.522251 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:18 crc kubenswrapper[4751]: E0227 16:27:18.522362 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:18 crc kubenswrapper[4751]: E0227 16:27:18.522459 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:18 crc kubenswrapper[4751]: I0227 16:27:18.523750 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:27:18 crc kubenswrapper[4751]: E0227 16:27:18.645040 4751 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:27:19 crc kubenswrapper[4751]: I0227 16:27:19.517485 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/3.log" Feb 27 16:27:19 crc kubenswrapper[4751]: I0227 16:27:19.520714 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerStarted","Data":"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f"} Feb 27 16:27:19 crc kubenswrapper[4751]: I0227 16:27:19.521271 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:27:19 crc kubenswrapper[4751]: I0227 16:27:19.562215 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podStartSLOduration=158.562197277 podStartE2EDuration="2m38.562197277s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:19.561077229 +0000 UTC m=+201.708091676" watchObservedRunningTime="2026-02-27 16:27:19.562197277 +0000 UTC m=+201.709211724" Feb 27 16:27:19 crc kubenswrapper[4751]: I0227 16:27:19.927277 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-4bnbv"] Feb 27 16:27:19 crc kubenswrapper[4751]: I0227 16:27:19.927418 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:19 crc kubenswrapper[4751]: E0227 16:27:19.927505 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:20 crc kubenswrapper[4751]: I0227 16:27:20.520696 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:20 crc kubenswrapper[4751]: I0227 16:27:20.520744 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:20 crc kubenswrapper[4751]: E0227 16:27:20.521248 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:20 crc kubenswrapper[4751]: I0227 16:27:20.520849 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:20 crc kubenswrapper[4751]: E0227 16:27:20.521330 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:20 crc kubenswrapper[4751]: E0227 16:27:20.521444 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:21 crc kubenswrapper[4751]: I0227 16:27:21.520517 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:21 crc kubenswrapper[4751]: E0227 16:27:21.520756 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:22 crc kubenswrapper[4751]: I0227 16:27:22.519629 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:22 crc kubenswrapper[4751]: I0227 16:27:22.519675 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:22 crc kubenswrapper[4751]: E0227 16:27:22.519765 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 27 16:27:22 crc kubenswrapper[4751]: I0227 16:27:22.519792 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:22 crc kubenswrapper[4751]: E0227 16:27:22.519897 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 27 16:27:22 crc kubenswrapper[4751]: E0227 16:27:22.520063 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 27 16:27:23 crc kubenswrapper[4751]: I0227 16:27:23.520549 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:23 crc kubenswrapper[4751]: E0227 16:27:23.520780 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4bnbv" podUID="7da183a7-dcda-4e22-b135-b1ef0d593811" Feb 27 16:27:24 crc kubenswrapper[4751]: I0227 16:27:24.520019 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:24 crc kubenswrapper[4751]: I0227 16:27:24.520169 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:24 crc kubenswrapper[4751]: I0227 16:27:24.520285 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:24 crc kubenswrapper[4751]: I0227 16:27:24.523712 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 27 16:27:24 crc kubenswrapper[4751]: I0227 16:27:24.523778 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 27 16:27:24 crc kubenswrapper[4751]: I0227 16:27:24.524688 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 27 16:27:24 crc kubenswrapper[4751]: I0227 16:27:24.525477 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 27 16:27:25 crc kubenswrapper[4751]: I0227 16:27:25.520139 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:25 crc kubenswrapper[4751]: I0227 16:27:25.524257 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 27 16:27:25 crc kubenswrapper[4751]: I0227 16:27:25.524364 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 27 16:27:28 crc kubenswrapper[4751]: I0227 16:27:28.926540 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.515324 4751 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.551104 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.551545 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.559054 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-htn5q"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.559572 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.560075 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.562516 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.565875 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.566557 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.566710 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.566815 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.566838 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-knf9q"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.572110 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-7hctb"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.572338 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.572509 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.572588 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.572833 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.573378 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.573697 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.574256 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.574760 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-dvwqp"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.575297 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.576299 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.576932 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.577376 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.578074 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.578327 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.578644 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.579130 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.579240 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.576941 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.581318 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.581566 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.581819 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.582183 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.582615 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.583147 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.587913 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.588322 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.588680 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.589341 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.590316 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.590718 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.590905 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.591234 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.591339 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.591464 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.591568 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.591712 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.591811 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.591899 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.592153 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-5f8sr"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.592646 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-5f8sr" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.593553 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7gs6x"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.594336 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.595972 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.596070 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.596488 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.596628 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.596683 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.596881 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-hb87p"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.597582 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.597744 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.597964 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.598527 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.599028 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.599378 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-fqng2"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.599875 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.600921 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.602993 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.603346 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-8fhbk"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.603472 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.603508 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.604493 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-hsqjr"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.604976 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.605460 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zjr9n"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.606039 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.606250 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.606412 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.606441 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.606613 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.606626 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.607477 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.612381 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-7hctb"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.616657 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.616929 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.620066 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.632673 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6490b89d-117d-4d8c-b625-b02d0404c882-encryption-config\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.632715 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrdnl\" (UniqueName: \"kubernetes.io/projected/6490b89d-117d-4d8c-b625-b02d0404c882-kube-api-access-rrdnl\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.632747 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6490b89d-117d-4d8c-b625-b02d0404c882-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.632800 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6490b89d-117d-4d8c-b625-b02d0404c882-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.632819 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6490b89d-117d-4d8c-b625-b02d0404c882-audit-dir\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.632868 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6490b89d-117d-4d8c-b625-b02d0404c882-audit-policies\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.632888 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6490b89d-117d-4d8c-b625-b02d0404c882-etcd-client\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.632910 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6490b89d-117d-4d8c-b625-b02d0404c882-serving-cert\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.633080 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.635849 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.635929 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.636143 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.636206 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.636744 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.636813 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.636980 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.637162 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.637208 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.637350 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.638308 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.639999 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.642217 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.643964 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.644019 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.644201 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.644416 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.644694 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.644760 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.645036 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.645072 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.645218 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.645385 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.665247 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.665550 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.665981 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.666274 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.666671 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.668673 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.668867 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.668867 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.669028 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.669159 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.669529 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.669533 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.669705 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.669735 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.670049 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.670072 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.670451 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.670861 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.672726 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.686040 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.686447 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.687200 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.687235 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-htn5q"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.689031 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.690791 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-txmh7"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.691290 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.691470 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.691911 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.692166 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.695382 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.698089 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.699921 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.700061 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.704628 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.706827 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-knf9q"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.707600 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.708334 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.708736 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.708842 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.709644 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.713384 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.713419 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.714066 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.717650 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.717989 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.718787 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.720471 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.721028 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.721352 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-r87pw"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.722186 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.722316 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.725894 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.726443 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.729042 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.730268 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.730970 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536826-mxq7k"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.731698 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536826-mxq7k" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.732063 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.733224 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734536 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6490b89d-117d-4d8c-b625-b02d0404c882-audit-policies\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734570 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6490b89d-117d-4d8c-b625-b02d0404c882-etcd-client\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734600 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0e28f47c-623d-4724-bcec-80d38d902eba-metrics-tls\") pod \"dns-operator-744455d44c-fqng2\" (UID: \"0e28f47c-623d-4724-bcec-80d38d902eba\") " pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734618 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6lpg\" (UniqueName: \"kubernetes.io/projected/cd1d7d7d-297e-4d67-94c2-746c0295105f-kube-api-access-n6lpg\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734660 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6490b89d-117d-4d8c-b625-b02d0404c882-encryption-config\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734677 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrdnl\" (UniqueName: \"kubernetes.io/projected/6490b89d-117d-4d8c-b625-b02d0404c882-kube-api-access-rrdnl\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734705 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6490b89d-117d-4d8c-b625-b02d0404c882-audit-dir\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734724 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6490b89d-117d-4d8c-b625-b02d0404c882-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734750 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cd1d7d7d-297e-4d67-94c2-746c0295105f-metrics-tls\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734771 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6490b89d-117d-4d8c-b625-b02d0404c882-serving-cert\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734789 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cd1d7d7d-297e-4d67-94c2-746c0295105f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734820 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg99z\" (UniqueName: \"kubernetes.io/projected/0e28f47c-623d-4724-bcec-80d38d902eba-kube-api-access-xg99z\") pod \"dns-operator-744455d44c-fqng2\" (UID: \"0e28f47c-623d-4724-bcec-80d38d902eba\") " pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734843 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cd1d7d7d-297e-4d67-94c2-746c0295105f-trusted-ca\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.734881 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6490b89d-117d-4d8c-b625-b02d0404c882-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.735053 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.735386 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6490b89d-117d-4d8c-b625-b02d0404c882-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.735728 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6490b89d-117d-4d8c-b625-b02d0404c882-audit-dir\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.735818 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6490b89d-117d-4d8c-b625-b02d0404c882-audit-policies\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.736227 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6490b89d-117d-4d8c-b625-b02d0404c882-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.737607 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jxspb"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.738678 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.739289 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mw4mn"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.739891 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.740347 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.742071 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6490b89d-117d-4d8c-b625-b02d0404c882-encryption-config\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.742522 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6490b89d-117d-4d8c-b625-b02d0404c882-serving-cert\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.744716 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6490b89d-117d-4d8c-b625-b02d0404c882-etcd-client\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.754320 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.755578 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-hfzxd"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.758288 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.758666 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.758854 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-hb87p"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.763214 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.763653 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.768141 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-j5z2q"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.769103 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.770707 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.771285 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.772530 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.773140 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.773567 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.774486 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.775114 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.776175 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.776636 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.776928 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.777272 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.778870 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-297f4"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.779617 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.779814 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-fqng2"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.780772 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zjr9n"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.784042 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.791102 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-8fhbk"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.793179 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.794225 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.795971 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-5f8sr"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.799687 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536826-mxq7k"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.800947 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.803109 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jxspb"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.805555 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7gs6x"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.805586 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.807484 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.808179 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-dvwqp"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.809112 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.809986 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.811012 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-txmh7"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.812005 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.812987 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.813832 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.813960 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.814982 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.815968 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.816914 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.818114 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-r87pw"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.818947 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.819944 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mw4mn"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.820916 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-297f4"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.822345 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.823271 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-j5z2q"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.823881 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-4848q"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.824712 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-4848q" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.824898 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qs2km"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.825924 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.826381 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-4848q"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.828786 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qs2km"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.830009 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.831032 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-vf5cg"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.831642 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.832594 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-vf5cg"] Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.833596 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.835461 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0e28f47c-623d-4724-bcec-80d38d902eba-metrics-tls\") pod \"dns-operator-744455d44c-fqng2\" (UID: \"0e28f47c-623d-4724-bcec-80d38d902eba\") " pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.835498 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6lpg\" (UniqueName: \"kubernetes.io/projected/cd1d7d7d-297e-4d67-94c2-746c0295105f-kube-api-access-n6lpg\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.835573 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cd1d7d7d-297e-4d67-94c2-746c0295105f-metrics-tls\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.835597 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cd1d7d7d-297e-4d67-94c2-746c0295105f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.835630 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg99z\" (UniqueName: \"kubernetes.io/projected/0e28f47c-623d-4724-bcec-80d38d902eba-kube-api-access-xg99z\") pod \"dns-operator-744455d44c-fqng2\" (UID: \"0e28f47c-623d-4724-bcec-80d38d902eba\") " pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.835651 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cd1d7d7d-297e-4d67-94c2-746c0295105f-trusted-ca\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.837818 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cd1d7d7d-297e-4d67-94c2-746c0295105f-trusted-ca\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.838379 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0e28f47c-623d-4724-bcec-80d38d902eba-metrics-tls\") pod \"dns-operator-744455d44c-fqng2\" (UID: \"0e28f47c-623d-4724-bcec-80d38d902eba\") " pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.839025 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cd1d7d7d-297e-4d67-94c2-746c0295105f-metrics-tls\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.859076 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.873748 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.893149 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.914197 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.933716 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.953903 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.973624 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Feb 27 16:27:31 crc kubenswrapper[4751]: I0227 16:27:31.994158 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.013702 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.034187 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.054284 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.094203 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.113448 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.134481 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.154649 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.189570 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.194577 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.215759 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.253569 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.274028 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.294156 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.313987 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.333364 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.354636 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.374804 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.394933 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.413778 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.434253 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.453471 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.474011 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.493158 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.514536 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.534553 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.553685 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.574652 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.594342 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.614367 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.633684 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.655244 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.673751 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.694084 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.714079 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.734144 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.752459 4751 request.go:700] Waited for 1.012254592s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/secrets?fieldSelector=metadata.name%3Dservice-ca-operator-dockercfg-rg9jl&limit=500&resourceVersion=0 Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.754732 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.775582 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.813736 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.819832 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrdnl\" (UniqueName: \"kubernetes.io/projected/6490b89d-117d-4d8c-b625-b02d0404c882-kube-api-access-rrdnl\") pod \"apiserver-7bbb656c7d-c458q\" (UID: \"6490b89d-117d-4d8c-b625-b02d0404c882\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.834786 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.854429 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.883785 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.893845 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.914246 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.934145 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.954854 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.974463 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Feb 27 16:27:32 crc kubenswrapper[4751]: I0227 16:27:32.993992 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.014315 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.034615 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.053385 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.075055 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.088269 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.094929 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.114518 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.134022 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.154504 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.174326 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.194916 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.213775 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.235243 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.253920 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.275439 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.294977 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.317292 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.335118 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.338728 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q"] Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.353600 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.373783 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.394742 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.414041 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.434968 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.454554 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.475013 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.494998 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.514504 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.533870 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.554204 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.574923 4751 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.580154 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" event={"ID":"6490b89d-117d-4d8c-b625-b02d0404c882","Type":"ContainerStarted","Data":"d0a789d5463712846ad1149423291742516fef6ab2640a7c5fbed7f2c52e42f1"} Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.595101 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.614034 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.634392 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.654193 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.693318 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cd1d7d7d-297e-4d67-94c2-746c0295105f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.720275 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6lpg\" (UniqueName: \"kubernetes.io/projected/cd1d7d7d-297e-4d67-94c2-746c0295105f-kube-api-access-n6lpg\") pod \"ingress-operator-5b745b69d9-zjprp\" (UID: \"cd1d7d7d-297e-4d67-94c2-746c0295105f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.729303 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg99z\" (UniqueName: \"kubernetes.io/projected/0e28f47c-623d-4724-bcec-80d38d902eba-kube-api-access-xg99z\") pod \"dns-operator-744455d44c-fqng2\" (UID: \"0e28f47c-623d-4724-bcec-80d38d902eba\") " pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.753076 4751 request.go:700] Waited for 1.516234313s due to client-side throttling, not priority and fairness, request: PATCH:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-dc59b4c8b-5svll/status Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.756939 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c6775c9-9034-4be4-8002-201a25a35eab-serving-cert\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.756992 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8de82997-0daf-469d-ba5e-23fcaaa04614-serving-cert\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757019 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757062 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-serving-cert\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757083 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757104 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e88532-9a76-44df-b084-bfd9c96457c7-config\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757120 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757150 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b967e25-f884-4c9b-8307-cd4b669bbf76-config\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757222 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757473 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppbrc\" (UniqueName: \"kubernetes.io/projected/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-kube-api-access-ppbrc\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757587 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757620 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/714d38ad-6a1e-4d9d-98ce-22a8582a6429-etcd-ca\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757639 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxwqn\" (UniqueName: \"kubernetes.io/projected/fbe81302-7306-4254-b54b-a91b118197a3-kube-api-access-pxwqn\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjwh6\" (UID: \"fbe81302-7306-4254-b54b-a91b118197a3\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757851 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757903 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-image-import-ca\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757924 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/51bdc38f-9b69-437f-9a17-edb963fb01ab-default-certificate\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757942 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/714d38ad-6a1e-4d9d-98ce-22a8582a6429-config\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.757960 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758011 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grvjx\" (UniqueName: \"kubernetes.io/projected/8de82997-0daf-469d-ba5e-23fcaaa04614-kube-api-access-grvjx\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758058 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/714d38ad-6a1e-4d9d-98ce-22a8582a6429-etcd-service-ca\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758086 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758116 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbe81302-7306-4254-b54b-a91b118197a3-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjwh6\" (UID: \"fbe81302-7306-4254-b54b-a91b118197a3\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758215 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758246 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758287 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-audit-dir\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758313 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l6z4\" (UniqueName: \"kubernetes.io/projected/ee5f53d5-2c38-465d-ad33-d5c0c5eb3923-kube-api-access-7l6z4\") pod \"downloads-7954f5f757-5f8sr\" (UID: \"ee5f53d5-2c38-465d-ad33-d5c0c5eb3923\") " pod="openshift-console/downloads-7954f5f757-5f8sr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758571 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxfpx\" (UniqueName: \"kubernetes.io/projected/7c6775c9-9034-4be4-8002-201a25a35eab-kube-api-access-bxfpx\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758606 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4b967e25-f884-4c9b-8307-cd4b669bbf76-auth-proxy-config\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758631 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngdrh\" (UniqueName: \"kubernetes.io/projected/4b967e25-f884-4c9b-8307-cd4b669bbf76-kube-api-access-ngdrh\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758649 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758670 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-oauth-serving-cert\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758692 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbe81302-7306-4254-b54b-a91b118197a3-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjwh6\" (UID: \"fbe81302-7306-4254-b54b-a91b118197a3\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758723 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drxnk\" (UniqueName: \"kubernetes.io/projected/e7e88532-9a76-44df-b084-bfd9c96457c7-kube-api-access-drxnk\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758762 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3ffa275a-62dc-46f6-ae70-34b5758d918e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758781 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14cd3902-6c39-409b-88f9-ddb6a23bc450-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-sblbf\" (UID: \"14cd3902-6c39-409b-88f9-ddb6a23bc450\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758803 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758825 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef47b0ab-908a-4d99-9517-32a5984070fb-serving-cert\") pod \"openshift-config-operator-7777fb866f-2c2zk\" (UID: \"ef47b0ab-908a-4d99-9517-32a5984070fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758850 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-policies\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758870 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-dir\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758890 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-audit\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758910 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwmgj\" (UniqueName: \"kubernetes.io/projected/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-kube-api-access-hwmgj\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758928 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f3483f04-316b-403b-9117-b744e8bc5c3f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-gjlwd\" (UID: \"f3483f04-316b-403b-9117-b744e8bc5c3f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758947 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/51bdc38f-9b69-437f-9a17-edb963fb01ab-service-ca-bundle\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758965 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.758981 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-client-ca\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759002 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-certificates\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759020 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ml4k4\" (UniqueName: \"kubernetes.io/projected/f3483f04-316b-403b-9117-b744e8bc5c3f-kube-api-access-ml4k4\") pod \"cluster-samples-operator-665b6dd947-gjlwd\" (UID: \"f3483f04-316b-403b-9117-b744e8bc5c3f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759038 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e7e88532-9a76-44df-b084-bfd9c96457c7-trusted-ca\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759123 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-config\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759226 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/51bdc38f-9b69-437f-9a17-edb963fb01ab-stats-auth\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759256 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/714d38ad-6a1e-4d9d-98ce-22a8582a6429-serving-cert\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759276 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78xgx\" (UniqueName: \"kubernetes.io/projected/714d38ad-6a1e-4d9d-98ce-22a8582a6429-kube-api-access-78xgx\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759293 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-images\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759311 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9592t\" (UniqueName: \"kubernetes.io/projected/ef47b0ab-908a-4d99-9517-32a5984070fb-kube-api-access-9592t\") pod \"openshift-config-operator-7777fb866f-2c2zk\" (UID: \"ef47b0ab-908a-4d99-9517-32a5984070fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759342 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-trusted-ca\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759365 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cvth\" (UniqueName: \"kubernetes.io/projected/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-kube-api-access-9cvth\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759412 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-tls\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759436 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-bound-sa-token\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759457 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e88532-9a76-44df-b084-bfd9c96457c7-serving-cert\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759479 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-node-pullsecrets\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759499 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-config\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759516 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-etcd-serving-ca\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759536 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759554 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wblv\" (UniqueName: \"kubernetes.io/projected/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-kube-api-access-4wblv\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759571 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/51bdc38f-9b69-437f-9a17-edb963fb01ab-metrics-certs\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759588 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-config\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759604 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-trusted-ca-bundle\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759624 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-encryption-config\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759640 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4b967e25-f884-4c9b-8307-cd4b669bbf76-machine-approver-tls\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759668 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-config\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759707 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759736 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3ffa275a-62dc-46f6-ae70-34b5758d918e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759759 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14cd3902-6c39-409b-88f9-ddb6a23bc450-config\") pod \"kube-apiserver-operator-766d6c64bb-sblbf\" (UID: \"14cd3902-6c39-409b-88f9-ddb6a23bc450\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759782 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/714d38ad-6a1e-4d9d-98ce-22a8582a6429-etcd-client\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759805 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ef47b0ab-908a-4d99-9517-32a5984070fb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2c2zk\" (UID: \"ef47b0ab-908a-4d99-9517-32a5984070fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759940 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-config\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.759974 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14cd3902-6c39-409b-88f9-ddb6a23bc450-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-sblbf\" (UID: \"14cd3902-6c39-409b-88f9-ddb6a23bc450\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760007 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdp4q\" (UniqueName: \"kubernetes.io/projected/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-kube-api-access-sdp4q\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:33 crc kubenswrapper[4751]: E0227 16:27:33.760028 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:34.26001386 +0000 UTC m=+216.407028427 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760052 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-serving-cert\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760314 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-service-ca\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760345 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4svm\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-kube-api-access-v4svm\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760367 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-trusted-ca-bundle\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760386 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xc9cw\" (UniqueName: \"kubernetes.io/projected/51bdc38f-9b69-437f-9a17-edb963fb01ab-kube-api-access-xc9cw\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760425 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760451 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-oauth-config\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760474 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-etcd-client\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760516 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-client-ca\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.760624 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.861489 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:33 crc kubenswrapper[4751]: E0227 16:27:33.861664 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:34.361629854 +0000 UTC m=+216.508644341 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862042 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grvjx\" (UniqueName: \"kubernetes.io/projected/8de82997-0daf-469d-ba5e-23fcaaa04614-kube-api-access-grvjx\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862143 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbe81302-7306-4254-b54b-a91b118197a3-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjwh6\" (UID: \"fbe81302-7306-4254-b54b-a91b118197a3\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862248 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l6z4\" (UniqueName: \"kubernetes.io/projected/ee5f53d5-2c38-465d-ad33-d5c0c5eb3923-kube-api-access-7l6z4\") pod \"downloads-7954f5f757-5f8sr\" (UID: \"ee5f53d5-2c38-465d-ad33-d5c0c5eb3923\") " pod="openshift-console/downloads-7954f5f757-5f8sr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862387 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89hmp\" (UniqueName: \"kubernetes.io/projected/e03572cc-cf26-4ed2-bb88-ae6d3150b904-kube-api-access-89hmp\") pod \"multus-admission-controller-857f4d67dd-r87pw\" (UID: \"e03572cc-cf26-4ed2-bb88-ae6d3150b904\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862495 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14cd3902-6c39-409b-88f9-ddb6a23bc450-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-sblbf\" (UID: \"14cd3902-6c39-409b-88f9-ddb6a23bc450\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862598 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef47b0ab-908a-4d99-9517-32a5984070fb-serving-cert\") pod \"openshift-config-operator-7777fb866f-2c2zk\" (UID: \"ef47b0ab-908a-4d99-9517-32a5984070fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862676 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03b7032d-fb47-4f5c-95b8-f69f58b65db7-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-6r57j\" (UID: \"03b7032d-fb47-4f5c-95b8-f69f58b65db7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862753 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-dir\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862867 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdrx8\" (UniqueName: \"kubernetes.io/projected/daf0456f-25d3-4dee-8eac-49f0056aa251-kube-api-access-wdrx8\") pod \"kube-storage-version-migrator-operator-b67b599dd-p4n6h\" (UID: \"daf0456f-25d3-4dee-8eac-49f0056aa251\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862974 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwmgj\" (UniqueName: \"kubernetes.io/projected/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-kube-api-access-hwmgj\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863072 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f3483f04-316b-403b-9117-b744e8bc5c3f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-gjlwd\" (UID: \"f3483f04-316b-403b-9117-b744e8bc5c3f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863177 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a74a9206-b910-48c2-8448-29baf6140688-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.862798 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-dir\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863284 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/578d81ae-01aa-4cc7-bdee-de283490661d-srv-cert\") pod \"olm-operator-6b444d44fb-k4pxk\" (UID: \"578d81ae-01aa-4cc7-bdee-de283490661d\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863379 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/50c130e9-87b3-43aa-a620-8faefd1add54-proxy-tls\") pod \"machine-config-controller-84d6567774-sk82j\" (UID: \"50c130e9-87b3-43aa-a620-8faefd1add54\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863436 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ml4k4\" (UniqueName: \"kubernetes.io/projected/f3483f04-316b-403b-9117-b744e8bc5c3f-kube-api-access-ml4k4\") pod \"cluster-samples-operator-665b6dd947-gjlwd\" (UID: \"f3483f04-316b-403b-9117-b744e8bc5c3f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863456 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7bgh\" (UniqueName: \"kubernetes.io/projected/11a526fe-64f1-4da8-a0e8-ed276ec069fb-kube-api-access-m7bgh\") pod \"auto-csr-approver-29536826-mxq7k\" (UID: \"11a526fe-64f1-4da8-a0e8-ed276ec069fb\") " pod="openshift-infra/auto-csr-approver-29536826-mxq7k" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863473 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff68n\" (UniqueName: \"kubernetes.io/projected/92ccd256-d5a2-4857-8acb-f11bc462ac9c-kube-api-access-ff68n\") pod \"catalog-operator-68c6474976-bfl2l\" (UID: \"92ccd256-d5a2-4857-8acb-f11bc462ac9c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863495 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78xgx\" (UniqueName: \"kubernetes.io/projected/714d38ad-6a1e-4d9d-98ce-22a8582a6429-kube-api-access-78xgx\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863514 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-images\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863530 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebd19e25-96f6-454f-9472-d504d15a4821-serving-cert\") pod \"service-ca-operator-777779d784-jxspb\" (UID: \"ebd19e25-96f6-454f-9472-d504d15a4821\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863550 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-629gj\" (UniqueName: \"kubernetes.io/projected/526f8eee-e82b-4a1b-93ff-46a732856a7c-kube-api-access-629gj\") pod \"ingress-canary-4848q\" (UID: \"526f8eee-e82b-4a1b-93ff-46a732856a7c\") " pod="openshift-ingress-canary/ingress-canary-4848q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863570 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xf5zk\" (UniqueName: \"kubernetes.io/projected/59eee97a-55e1-4400-8f57-ab0781947114-kube-api-access-xf5zk\") pod \"openshift-apiserver-operator-796bbdcf4f-blvwj\" (UID: \"59eee97a-55e1-4400-8f57-ab0781947114\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863594 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cvth\" (UniqueName: \"kubernetes.io/projected/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-kube-api-access-9cvth\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863630 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-bound-sa-token\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863668 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59eee97a-55e1-4400-8f57-ab0781947114-config\") pod \"openshift-apiserver-operator-796bbdcf4f-blvwj\" (UID: \"59eee97a-55e1-4400-8f57-ab0781947114\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863690 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xjpr\" (UniqueName: \"kubernetes.io/projected/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-kube-api-access-2xjpr\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863717 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-node-pullsecrets\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863740 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863769 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863791 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14cd3902-6c39-409b-88f9-ddb6a23bc450-config\") pod \"kube-apiserver-operator-766d6c64bb-sblbf\" (UID: \"14cd3902-6c39-409b-88f9-ddb6a23bc450\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863814 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ef47b0ab-908a-4d99-9517-32a5984070fb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2c2zk\" (UID: \"ef47b0ab-908a-4d99-9517-32a5984070fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863841 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-config\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863842 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-node-pullsecrets\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.863863 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdp4q\" (UniqueName: \"kubernetes.io/projected/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-kube-api-access-sdp4q\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864033 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-service-ca\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864072 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ea0878fe-ac62-445d-87c1-248128612682-signing-key\") pod \"service-ca-9c57cc56f-j5z2q\" (UID: \"ea0878fe-ac62-445d-87c1-248128612682\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864108 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7c1d7708-439d-4d51-ab68-0a2058480646-metrics-tls\") pod \"dns-default-vf5cg\" (UID: \"7c1d7708-439d-4d51-ab68-0a2058480646\") " pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864143 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-client-ca\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864174 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864212 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ddcb9142-ff54-44b3-bb37-0e4b103c407e-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vrmbr\" (UID: \"ddcb9142-ff54-44b3-bb37-0e4b103c407e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864220 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ef47b0ab-908a-4d99-9517-32a5984070fb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2c2zk\" (UID: \"ef47b0ab-908a-4d99-9517-32a5984070fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864264 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/4155e5dc-eb83-4d58-bb2b-554fcbda2e8c-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-rjqrq\" (UID: \"4155e5dc-eb83-4d58-bb2b-554fcbda2e8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864302 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864335 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/10b563d1-9d53-4755-9b40-4f907b6ea224-tmpfs\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864366 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/10b563d1-9d53-4755-9b40-4f907b6ea224-apiservice-cert\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864395 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/578d81ae-01aa-4cc7-bdee-de283490661d-profile-collector-cert\") pod \"olm-operator-6b444d44fb-k4pxk\" (UID: \"578d81ae-01aa-4cc7-bdee-de283490661d\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864461 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmnjn\" (UniqueName: \"kubernetes.io/projected/d474cfdd-550b-471d-aea1-6bc3f8532fa5-kube-api-access-mmnjn\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864472 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-images\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864501 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e88532-9a76-44df-b084-bfd9c96457c7-config\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864533 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864568 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69xtc\" (UniqueName: \"kubernetes.io/projected/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-kube-api-access-69xtc\") pod \"marketplace-operator-79b997595-mw4mn\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:33 crc kubenswrapper[4751]: E0227 16:27:33.864605 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:34.364588041 +0000 UTC m=+216.511602528 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864727 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14cd3902-6c39-409b-88f9-ddb6a23bc450-config\") pod \"kube-apiserver-operator-766d6c64bb-sblbf\" (UID: \"14cd3902-6c39-409b-88f9-ddb6a23bc450\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864854 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.864888 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddcb9142-ff54-44b3-bb37-0e4b103c407e-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vrmbr\" (UID: \"ddcb9142-ff54-44b3-bb37-0e4b103c407e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865013 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865098 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-image-import-ca\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865137 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/51bdc38f-9b69-437f-9a17-edb963fb01ab-default-certificate\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865158 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-service-ca\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865201 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/714d38ad-6a1e-4d9d-98ce-22a8582a6429-config\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865267 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/714d38ad-6a1e-4d9d-98ce-22a8582a6429-etcd-service-ca\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865304 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mc52\" (UniqueName: \"kubernetes.io/projected/4155e5dc-eb83-4d58-bb2b-554fcbda2e8c-kube-api-access-7mc52\") pod \"control-plane-machine-set-operator-78cbb6b69f-rjqrq\" (UID: \"4155e5dc-eb83-4d58-bb2b-554fcbda2e8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865566 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865606 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/daf0456f-25d3-4dee-8eac-49f0056aa251-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-p4n6h\" (UID: \"daf0456f-25d3-4dee-8eac-49f0056aa251\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865642 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865674 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865754 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-config\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865777 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ea0878fe-ac62-445d-87c1-248128612682-signing-cabundle\") pod \"service-ca-9c57cc56f-j5z2q\" (UID: \"ea0878fe-ac62-445d-87c1-248128612682\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865812 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rllc\" (UniqueName: \"kubernetes.io/projected/7c1d7708-439d-4d51-ab68-0a2058480646-kube-api-access-2rllc\") pod \"dns-default-vf5cg\" (UID: \"7c1d7708-439d-4d51-ab68-0a2058480646\") " pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865843 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7b0a19d1-3c2c-444a-8b89-b417e7324b78-node-bootstrap-token\") pod \"machine-config-server-hfzxd\" (UID: \"7b0a19d1-3c2c-444a-8b89-b417e7324b78\") " pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865875 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/03b7032d-fb47-4f5c-95b8-f69f58b65db7-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-6r57j\" (UID: \"03b7032d-fb47-4f5c-95b8-f69f58b65db7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865908 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-audit-dir\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865940 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxfpx\" (UniqueName: \"kubernetes.io/projected/7c6775c9-9034-4be4-8002-201a25a35eab-kube-api-access-bxfpx\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.865972 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4b967e25-f884-4c9b-8307-cd4b669bbf76-auth-proxy-config\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866078 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngdrh\" (UniqueName: \"kubernetes.io/projected/4b967e25-f884-4c9b-8307-cd4b669bbf76-kube-api-access-ngdrh\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866112 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866142 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-oauth-serving-cert\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866143 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e88532-9a76-44df-b084-bfd9c96457c7-config\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866171 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbe81302-7306-4254-b54b-a91b118197a3-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjwh6\" (UID: \"fbe81302-7306-4254-b54b-a91b118197a3\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866220 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drxnk\" (UniqueName: \"kubernetes.io/projected/e7e88532-9a76-44df-b084-bfd9c96457c7-kube-api-access-drxnk\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866253 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-proxy-tls\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866288 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3ffa275a-62dc-46f6-ae70-34b5758d918e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866297 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-audit-dir\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866323 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866354 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mw4mn\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866391 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmcq7\" (UniqueName: \"kubernetes.io/projected/2680126d-1cf3-4cbd-a130-3d8d0070a394-kube-api-access-bmcq7\") pod \"collect-profiles-29536815-fc4ph\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866499 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-policies\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866541 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/89171c06-8a67-420c-bb2e-0608ceb22697-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h46g5\" (UID: \"89171c06-8a67-420c-bb2e-0608ceb22697\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866669 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/50c130e9-87b3-43aa-a620-8faefd1add54-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sk82j\" (UID: \"50c130e9-87b3-43aa-a620-8faefd1add54\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866060 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-client-ca\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866705 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-auth-proxy-config\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866739 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-audit\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866776 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/51bdc38f-9b69-437f-9a17-edb963fb01ab-service-ca-bundle\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866808 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866840 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-client-ca\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866873 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzp6x\" (UniqueName: \"kubernetes.io/projected/89171c06-8a67-420c-bb2e-0608ceb22697-kube-api-access-tzp6x\") pod \"package-server-manager-789f6589d5-h46g5\" (UID: \"89171c06-8a67-420c-bb2e-0608ceb22697\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866904 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-csi-data-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.867133 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4b967e25-f884-4c9b-8307-cd4b669bbf76-auth-proxy-config\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.867174 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-image-import-ca\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.867525 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-oauth-serving-cert\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.867739 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbe81302-7306-4254-b54b-a91b118197a3-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjwh6\" (UID: \"fbe81302-7306-4254-b54b-a91b118197a3\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.868195 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-policies\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.868543 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/51bdc38f-9b69-437f-9a17-edb963fb01ab-default-certificate\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.866675 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.868647 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.868809 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-config\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.869461 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3ffa275a-62dc-46f6-ae70-34b5758d918e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.869638 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/714d38ad-6a1e-4d9d-98ce-22a8582a6429-etcd-service-ca\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.869747 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/714d38ad-6a1e-4d9d-98ce-22a8582a6429-config\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.869757 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-certificates\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.869787 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.869823 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e7e88532-9a76-44df-b084-bfd9c96457c7-trusted-ca\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.869849 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-client-ca\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.869876 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-socket-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.870041 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/51bdc38f-9b69-437f-9a17-edb963fb01ab-stats-auth\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.870042 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.870104 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/714d38ad-6a1e-4d9d-98ce-22a8582a6429-serving-cert\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.870133 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9592t\" (UniqueName: \"kubernetes.io/projected/ef47b0ab-908a-4d99-9517-32a5984070fb-kube-api-access-9592t\") pod \"openshift-config-operator-7777fb866f-2c2zk\" (UID: \"ef47b0ab-908a-4d99-9517-32a5984070fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.870306 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a74a9206-b910-48c2-8448-29baf6140688-service-ca-bundle\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.870472 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-trusted-ca\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.870514 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-tls\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.870556 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2680126d-1cf3-4cbd-a130-3d8d0070a394-secret-volume\") pod \"collect-profiles-29536815-fc4ph\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.870642 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-audit\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871011 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871102 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e88532-9a76-44df-b084-bfd9c96457c7-serving-cert\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871115 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-certificates\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871163 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59eee97a-55e1-4400-8f57-ab0781947114-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-blvwj\" (UID: \"59eee97a-55e1-4400-8f57-ab0781947114\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871196 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-config\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871300 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-etcd-serving-ca\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871495 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wblv\" (UniqueName: \"kubernetes.io/projected/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-kube-api-access-4wblv\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871608 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a74a9206-b910-48c2-8448-29baf6140688-serving-cert\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871673 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebd19e25-96f6-454f-9472-d504d15a4821-config\") pod \"service-ca-operator-777779d784-jxspb\" (UID: \"ebd19e25-96f6-454f-9472-d504d15a4821\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871725 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-plugins-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871781 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/51bdc38f-9b69-437f-9a17-edb963fb01ab-metrics-certs\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871922 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-config\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871961 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e7e88532-9a76-44df-b084-bfd9c96457c7-trusted-ca\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871968 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-trusted-ca-bundle\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872089 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/daf0456f-25d3-4dee-8eac-49f0056aa251-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-p4n6h\" (UID: \"daf0456f-25d3-4dee-8eac-49f0056aa251\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.871980 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-config\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872153 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-etcd-serving-ca\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872198 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2680126d-1cf3-4cbd-a130-3d8d0070a394-config-volume\") pod \"collect-profiles-29536815-fc4ph\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872301 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-encryption-config\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872486 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4b967e25-f884-4c9b-8307-cd4b669bbf76-machine-approver-tls\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872674 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-config\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872739 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-config\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872738 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e03572cc-cf26-4ed2-bb88-ae6d3150b904-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-r87pw\" (UID: \"e03572cc-cf26-4ed2-bb88-ae6d3150b904\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872839 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-images\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872901 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3ffa275a-62dc-46f6-ae70-34b5758d918e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.872942 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/714d38ad-6a1e-4d9d-98ce-22a8582a6429-etcd-client\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873064 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7vxx\" (UniqueName: \"kubernetes.io/projected/ea0878fe-ac62-445d-87c1-248128612682-kube-api-access-c7vxx\") pod \"service-ca-9c57cc56f-j5z2q\" (UID: \"ea0878fe-ac62-445d-87c1-248128612682\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873115 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbfvf\" (UniqueName: \"kubernetes.io/projected/520e0111-144a-404b-8dee-61fb546bc717-kube-api-access-wbfvf\") pod \"migrator-59844c95c7-vnzzq\" (UID: \"520e0111-144a-404b-8dee-61fb546bc717\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873121 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-config\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873155 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/10b563d1-9d53-4755-9b40-4f907b6ea224-webhook-cert\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873536 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873629 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14cd3902-6c39-409b-88f9-ddb6a23bc450-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-sblbf\" (UID: \"14cd3902-6c39-409b-88f9-ddb6a23bc450\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873763 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-serving-cert\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873864 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkt8f\" (UniqueName: \"kubernetes.io/projected/578d81ae-01aa-4cc7-bdee-de283490661d-kube-api-access-xkt8f\") pod \"olm-operator-6b444d44fb-k4pxk\" (UID: \"578d81ae-01aa-4cc7-bdee-de283490661d\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873903 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef47b0ab-908a-4d99-9517-32a5984070fb-serving-cert\") pod \"openshift-config-operator-7777fb866f-2c2zk\" (UID: \"ef47b0ab-908a-4d99-9517-32a5984070fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873937 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4svm\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-kube-api-access-v4svm\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873971 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-trusted-ca\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.873982 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-trusted-ca-bundle\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.874137 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xc9cw\" (UniqueName: \"kubernetes.io/projected/51bdc38f-9b69-437f-9a17-edb963fb01ab-kube-api-access-xc9cw\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.874231 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.874293 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-oauth-config\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.874581 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-etcd-client\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.874667 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7v5t\" (UniqueName: \"kubernetes.io/projected/a74a9206-b910-48c2-8448-29baf6140688-kube-api-access-r7v5t\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.874746 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03b7032d-fb47-4f5c-95b8-f69f58b65db7-config\") pod \"kube-controller-manager-operator-78b949d7b-6r57j\" (UID: \"03b7032d-fb47-4f5c-95b8-f69f58b65db7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.874949 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875082 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddcb9142-ff54-44b3-bb37-0e4b103c407e-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vrmbr\" (UID: \"ddcb9142-ff54-44b3-bb37-0e4b103c407e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875114 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/92ccd256-d5a2-4857-8acb-f11bc462ac9c-srv-cert\") pod \"catalog-operator-68c6474976-bfl2l\" (UID: \"92ccd256-d5a2-4857-8acb-f11bc462ac9c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875212 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-config\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875346 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c6775c9-9034-4be4-8002-201a25a35eab-serving-cert\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875381 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8de82997-0daf-469d-ba5e-23fcaaa04614-serving-cert\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875431 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875466 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.874678 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875598 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62rln\" (UniqueName: \"kubernetes.io/projected/10b563d1-9d53-4755-9b40-4f907b6ea224-kube-api-access-62rln\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875655 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mw4mn\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875724 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-serving-cert\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875821 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a74a9206-b910-48c2-8448-29baf6140688-config\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875857 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/526f8eee-e82b-4a1b-93ff-46a732856a7c-cert\") pod \"ingress-canary-4848q\" (UID: \"526f8eee-e82b-4a1b-93ff-46a732856a7c\") " pod="openshift-ingress-canary/ingress-canary-4848q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875890 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/92ccd256-d5a2-4857-8acb-f11bc462ac9c-profile-collector-cert\") pod \"catalog-operator-68c6474976-bfl2l\" (UID: \"92ccd256-d5a2-4857-8acb-f11bc462ac9c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875936 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thght\" (UniqueName: \"kubernetes.io/projected/ebd19e25-96f6-454f-9472-d504d15a4821-kube-api-access-thght\") pod \"service-ca-operator-777779d784-jxspb\" (UID: \"ebd19e25-96f6-454f-9472-d504d15a4821\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875967 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x75dp\" (UniqueName: \"kubernetes.io/projected/7b0a19d1-3c2c-444a-8b89-b417e7324b78-kube-api-access-x75dp\") pod \"machine-config-server-hfzxd\" (UID: \"7b0a19d1-3c2c-444a-8b89-b417e7324b78\") " pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.875998 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j84rz\" (UniqueName: \"kubernetes.io/projected/50c130e9-87b3-43aa-a620-8faefd1add54-kube-api-access-j84rz\") pod \"machine-config-controller-84d6567774-sk82j\" (UID: \"50c130e9-87b3-43aa-a620-8faefd1add54\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876029 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7b0a19d1-3c2c-444a-8b89-b417e7324b78-certs\") pod \"machine-config-server-hfzxd\" (UID: \"7b0a19d1-3c2c-444a-8b89-b417e7324b78\") " pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876144 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b967e25-f884-4c9b-8307-cd4b669bbf76-config\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876179 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876217 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppbrc\" (UniqueName: \"kubernetes.io/projected/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-kube-api-access-ppbrc\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876255 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7c1d7708-439d-4d51-ab68-0a2058480646-config-volume\") pod \"dns-default-vf5cg\" (UID: \"7c1d7708-439d-4d51-ab68-0a2058480646\") " pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876288 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-mountpoint-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876324 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/714d38ad-6a1e-4d9d-98ce-22a8582a6429-etcd-ca\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876360 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxwqn\" (UniqueName: \"kubernetes.io/projected/fbe81302-7306-4254-b54b-a91b118197a3-kube-api-access-pxwqn\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjwh6\" (UID: \"fbe81302-7306-4254-b54b-a91b118197a3\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876394 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876456 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-registration-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876455 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-trusted-ca-bundle\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876835 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/714d38ad-6a1e-4d9d-98ce-22a8582a6429-serving-cert\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.876986 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b967e25-f884-4c9b-8307-cd4b669bbf76-config\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.877577 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.877973 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/714d38ad-6a1e-4d9d-98ce-22a8582a6429-etcd-ca\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.878966 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-trusted-ca-bundle\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.879121 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-encryption-config\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.880197 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8de82997-0daf-469d-ba5e-23fcaaa04614-serving-cert\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.880249 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14cd3902-6c39-409b-88f9-ddb6a23bc450-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-sblbf\" (UID: \"14cd3902-6c39-409b-88f9-ddb6a23bc450\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.880326 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.880794 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/51bdc38f-9b69-437f-9a17-edb963fb01ab-service-ca-bundle\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.880882 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbe81302-7306-4254-b54b-a91b118197a3-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjwh6\" (UID: \"fbe81302-7306-4254-b54b-a91b118197a3\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.881464 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.882431 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c6775c9-9034-4be4-8002-201a25a35eab-serving-cert\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.883313 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-serving-cert\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.883486 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.884278 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.884810 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.884867 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/51bdc38f-9b69-437f-9a17-edb963fb01ab-stats-auth\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.885054 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/51bdc38f-9b69-437f-9a17-edb963fb01ab-metrics-certs\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.885077 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-oauth-config\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.885894 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-etcd-client\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.886378 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/714d38ad-6a1e-4d9d-98ce-22a8582a6429-etcd-client\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.896119 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3ffa275a-62dc-46f6-ae70-34b5758d918e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.896176 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f3483f04-316b-403b-9117-b744e8bc5c3f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-gjlwd\" (UID: \"f3483f04-316b-403b-9117-b744e8bc5c3f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.896641 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.897980 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-serving-cert\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.898160 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-tls\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.898279 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.906990 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e88532-9a76-44df-b084-bfd9c96457c7-serving-cert\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.911024 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4b967e25-f884-4c9b-8307-cd4b669bbf76-machine-approver-tls\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.922133 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grvjx\" (UniqueName: \"kubernetes.io/projected/8de82997-0daf-469d-ba5e-23fcaaa04614-kube-api-access-grvjx\") pod \"controller-manager-879f6c89f-htn5q\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.931028 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l6z4\" (UniqueName: \"kubernetes.io/projected/ee5f53d5-2c38-465d-ad33-d5c0c5eb3923-kube-api-access-7l6z4\") pod \"downloads-7954f5f757-5f8sr\" (UID: \"ee5f53d5-2c38-465d-ad33-d5c0c5eb3923\") " pod="openshift-console/downloads-7954f5f757-5f8sr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.957663 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14cd3902-6c39-409b-88f9-ddb6a23bc450-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-sblbf\" (UID: \"14cd3902-6c39-409b-88f9-ddb6a23bc450\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.971343 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwmgj\" (UniqueName: \"kubernetes.io/projected/c3fb94cc-a0da-4ecb-b0ba-a4666e264e70-kube-api-access-hwmgj\") pod \"apiserver-76f77b778f-knf9q\" (UID: \"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70\") " pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.977106 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:33 crc kubenswrapper[4751]: E0227 16:27:33.977273 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:34.477241884 +0000 UTC m=+216.624256341 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.977393 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a74a9206-b910-48c2-8448-29baf6140688-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.977522 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/578d81ae-01aa-4cc7-bdee-de283490661d-srv-cert\") pod \"olm-operator-6b444d44fb-k4pxk\" (UID: \"578d81ae-01aa-4cc7-bdee-de283490661d\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.977567 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/50c130e9-87b3-43aa-a620-8faefd1add54-proxy-tls\") pod \"machine-config-controller-84d6567774-sk82j\" (UID: \"50c130e9-87b3-43aa-a620-8faefd1add54\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.977611 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7bgh\" (UniqueName: \"kubernetes.io/projected/11a526fe-64f1-4da8-a0e8-ed276ec069fb-kube-api-access-m7bgh\") pod \"auto-csr-approver-29536826-mxq7k\" (UID: \"11a526fe-64f1-4da8-a0e8-ed276ec069fb\") " pod="openshift-infra/auto-csr-approver-29536826-mxq7k" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.977641 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff68n\" (UniqueName: \"kubernetes.io/projected/92ccd256-d5a2-4857-8acb-f11bc462ac9c-kube-api-access-ff68n\") pod \"catalog-operator-68c6474976-bfl2l\" (UID: \"92ccd256-d5a2-4857-8acb-f11bc462ac9c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.977682 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebd19e25-96f6-454f-9472-d504d15a4821-serving-cert\") pod \"service-ca-operator-777779d784-jxspb\" (UID: \"ebd19e25-96f6-454f-9472-d504d15a4821\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.978046 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-629gj\" (UniqueName: \"kubernetes.io/projected/526f8eee-e82b-4a1b-93ff-46a732856a7c-kube-api-access-629gj\") pod \"ingress-canary-4848q\" (UID: \"526f8eee-e82b-4a1b-93ff-46a732856a7c\") " pod="openshift-ingress-canary/ingress-canary-4848q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.978713 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xf5zk\" (UniqueName: \"kubernetes.io/projected/59eee97a-55e1-4400-8f57-ab0781947114-kube-api-access-xf5zk\") pod \"openshift-apiserver-operator-796bbdcf4f-blvwj\" (UID: \"59eee97a-55e1-4400-8f57-ab0781947114\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.978792 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59eee97a-55e1-4400-8f57-ab0781947114-config\") pod \"openshift-apiserver-operator-796bbdcf4f-blvwj\" (UID: \"59eee97a-55e1-4400-8f57-ab0781947114\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.978829 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xjpr\" (UniqueName: \"kubernetes.io/projected/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-kube-api-access-2xjpr\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.978889 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.978973 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ea0878fe-ac62-445d-87c1-248128612682-signing-key\") pod \"service-ca-9c57cc56f-j5z2q\" (UID: \"ea0878fe-ac62-445d-87c1-248128612682\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979006 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7c1d7708-439d-4d51-ab68-0a2058480646-metrics-tls\") pod \"dns-default-vf5cg\" (UID: \"7c1d7708-439d-4d51-ab68-0a2058480646\") " pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979043 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ddcb9142-ff54-44b3-bb37-0e4b103c407e-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vrmbr\" (UID: \"ddcb9142-ff54-44b3-bb37-0e4b103c407e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979078 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/4155e5dc-eb83-4d58-bb2b-554fcbda2e8c-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-rjqrq\" (UID: \"4155e5dc-eb83-4d58-bb2b-554fcbda2e8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979125 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/10b563d1-9d53-4755-9b40-4f907b6ea224-tmpfs\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979155 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/10b563d1-9d53-4755-9b40-4f907b6ea224-apiservice-cert\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979184 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/578d81ae-01aa-4cc7-bdee-de283490661d-profile-collector-cert\") pod \"olm-operator-6b444d44fb-k4pxk\" (UID: \"578d81ae-01aa-4cc7-bdee-de283490661d\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979217 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmnjn\" (UniqueName: \"kubernetes.io/projected/d474cfdd-550b-471d-aea1-6bc3f8532fa5-kube-api-access-mmnjn\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979250 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69xtc\" (UniqueName: \"kubernetes.io/projected/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-kube-api-access-69xtc\") pod \"marketplace-operator-79b997595-mw4mn\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979292 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddcb9142-ff54-44b3-bb37-0e4b103c407e-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vrmbr\" (UID: \"ddcb9142-ff54-44b3-bb37-0e4b103c407e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979356 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mc52\" (UniqueName: \"kubernetes.io/projected/4155e5dc-eb83-4d58-bb2b-554fcbda2e8c-kube-api-access-7mc52\") pod \"control-plane-machine-set-operator-78cbb6b69f-rjqrq\" (UID: \"4155e5dc-eb83-4d58-bb2b-554fcbda2e8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979389 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/daf0456f-25d3-4dee-8eac-49f0056aa251-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-p4n6h\" (UID: \"daf0456f-25d3-4dee-8eac-49f0056aa251\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979452 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ea0878fe-ac62-445d-87c1-248128612682-signing-cabundle\") pod \"service-ca-9c57cc56f-j5z2q\" (UID: \"ea0878fe-ac62-445d-87c1-248128612682\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979486 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rllc\" (UniqueName: \"kubernetes.io/projected/7c1d7708-439d-4d51-ab68-0a2058480646-kube-api-access-2rllc\") pod \"dns-default-vf5cg\" (UID: \"7c1d7708-439d-4d51-ab68-0a2058480646\") " pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979518 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7b0a19d1-3c2c-444a-8b89-b417e7324b78-node-bootstrap-token\") pod \"machine-config-server-hfzxd\" (UID: \"7b0a19d1-3c2c-444a-8b89-b417e7324b78\") " pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979549 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/03b7032d-fb47-4f5c-95b8-f69f58b65db7-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-6r57j\" (UID: \"03b7032d-fb47-4f5c-95b8-f69f58b65db7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979634 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-proxy-tls\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979695 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mw4mn\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979732 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmcq7\" (UniqueName: \"kubernetes.io/projected/2680126d-1cf3-4cbd-a130-3d8d0070a394-kube-api-access-bmcq7\") pod \"collect-profiles-29536815-fc4ph\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979774 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/89171c06-8a67-420c-bb2e-0608ceb22697-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h46g5\" (UID: \"89171c06-8a67-420c-bb2e-0608ceb22697\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979818 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/50c130e9-87b3-43aa-a620-8faefd1add54-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sk82j\" (UID: \"50c130e9-87b3-43aa-a620-8faefd1add54\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979861 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzp6x\" (UniqueName: \"kubernetes.io/projected/89171c06-8a67-420c-bb2e-0608ceb22697-kube-api-access-tzp6x\") pod \"package-server-manager-789f6589d5-h46g5\" (UID: \"89171c06-8a67-420c-bb2e-0608ceb22697\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979891 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-csi-data-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.979925 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-auth-proxy-config\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.980963 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a74a9206-b910-48c2-8448-29baf6140688-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.981790 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebd19e25-96f6-454f-9472-d504d15a4821-serving-cert\") pod \"service-ca-operator-777779d784-jxspb\" (UID: \"ebd19e25-96f6-454f-9472-d504d15a4821\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:33 crc kubenswrapper[4751]: E0227 16:27:33.982078 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:34.482063071 +0000 UTC m=+216.629077628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.982202 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/50c130e9-87b3-43aa-a620-8faefd1add54-proxy-tls\") pod \"machine-config-controller-84d6567774-sk82j\" (UID: \"50c130e9-87b3-43aa-a620-8faefd1add54\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.983534 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59eee97a-55e1-4400-8f57-ab0781947114-config\") pod \"openshift-apiserver-operator-796bbdcf4f-blvwj\" (UID: \"59eee97a-55e1-4400-8f57-ab0781947114\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984160 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a74a9206-b910-48c2-8448-29baf6140688-service-ca-bundle\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984203 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-socket-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984257 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2680126d-1cf3-4cbd-a130-3d8d0070a394-secret-volume\") pod \"collect-profiles-29536815-fc4ph\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984292 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59eee97a-55e1-4400-8f57-ab0781947114-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-blvwj\" (UID: \"59eee97a-55e1-4400-8f57-ab0781947114\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984334 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a74a9206-b910-48c2-8448-29baf6140688-serving-cert\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984376 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebd19e25-96f6-454f-9472-d504d15a4821-config\") pod \"service-ca-operator-777779d784-jxspb\" (UID: \"ebd19e25-96f6-454f-9472-d504d15a4821\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984450 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-plugins-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984491 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/daf0456f-25d3-4dee-8eac-49f0056aa251-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-p4n6h\" (UID: \"daf0456f-25d3-4dee-8eac-49f0056aa251\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984521 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2680126d-1cf3-4cbd-a130-3d8d0070a394-config-volume\") pod \"collect-profiles-29536815-fc4ph\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984555 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e03572cc-cf26-4ed2-bb88-ae6d3150b904-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-r87pw\" (UID: \"e03572cc-cf26-4ed2-bb88-ae6d3150b904\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984586 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-images\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984621 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7vxx\" (UniqueName: \"kubernetes.io/projected/ea0878fe-ac62-445d-87c1-248128612682-kube-api-access-c7vxx\") pod \"service-ca-9c57cc56f-j5z2q\" (UID: \"ea0878fe-ac62-445d-87c1-248128612682\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984652 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbfvf\" (UniqueName: \"kubernetes.io/projected/520e0111-144a-404b-8dee-61fb546bc717-kube-api-access-wbfvf\") pod \"migrator-59844c95c7-vnzzq\" (UID: \"520e0111-144a-404b-8dee-61fb546bc717\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984695 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/10b563d1-9d53-4755-9b40-4f907b6ea224-webhook-cert\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984734 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkt8f\" (UniqueName: \"kubernetes.io/projected/578d81ae-01aa-4cc7-bdee-de283490661d-kube-api-access-xkt8f\") pod \"olm-operator-6b444d44fb-k4pxk\" (UID: \"578d81ae-01aa-4cc7-bdee-de283490661d\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984812 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7v5t\" (UniqueName: \"kubernetes.io/projected/a74a9206-b910-48c2-8448-29baf6140688-kube-api-access-r7v5t\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984870 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03b7032d-fb47-4f5c-95b8-f69f58b65db7-config\") pod \"kube-controller-manager-operator-78b949d7b-6r57j\" (UID: \"03b7032d-fb47-4f5c-95b8-f69f58b65db7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984904 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/92ccd256-d5a2-4857-8acb-f11bc462ac9c-srv-cert\") pod \"catalog-operator-68c6474976-bfl2l\" (UID: \"92ccd256-d5a2-4857-8acb-f11bc462ac9c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984944 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddcb9142-ff54-44b3-bb37-0e4b103c407e-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vrmbr\" (UID: \"ddcb9142-ff54-44b3-bb37-0e4b103c407e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.984979 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62rln\" (UniqueName: \"kubernetes.io/projected/10b563d1-9d53-4755-9b40-4f907b6ea224-kube-api-access-62rln\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985011 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mw4mn\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985044 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a74a9206-b910-48c2-8448-29baf6140688-config\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985073 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/526f8eee-e82b-4a1b-93ff-46a732856a7c-cert\") pod \"ingress-canary-4848q\" (UID: \"526f8eee-e82b-4a1b-93ff-46a732856a7c\") " pod="openshift-ingress-canary/ingress-canary-4848q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985103 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/92ccd256-d5a2-4857-8acb-f11bc462ac9c-profile-collector-cert\") pod \"catalog-operator-68c6474976-bfl2l\" (UID: \"92ccd256-d5a2-4857-8acb-f11bc462ac9c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985136 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x75dp\" (UniqueName: \"kubernetes.io/projected/7b0a19d1-3c2c-444a-8b89-b417e7324b78-kube-api-access-x75dp\") pod \"machine-config-server-hfzxd\" (UID: \"7b0a19d1-3c2c-444a-8b89-b417e7324b78\") " pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985168 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j84rz\" (UniqueName: \"kubernetes.io/projected/50c130e9-87b3-43aa-a620-8faefd1add54-kube-api-access-j84rz\") pod \"machine-config-controller-84d6567774-sk82j\" (UID: \"50c130e9-87b3-43aa-a620-8faefd1add54\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985199 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thght\" (UniqueName: \"kubernetes.io/projected/ebd19e25-96f6-454f-9472-d504d15a4821-kube-api-access-thght\") pod \"service-ca-operator-777779d784-jxspb\" (UID: \"ebd19e25-96f6-454f-9472-d504d15a4821\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985246 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7c1d7708-439d-4d51-ab68-0a2058480646-config-volume\") pod \"dns-default-vf5cg\" (UID: \"7c1d7708-439d-4d51-ab68-0a2058480646\") " pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985275 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7b0a19d1-3c2c-444a-8b89-b417e7324b78-certs\") pod \"machine-config-server-hfzxd\" (UID: \"7b0a19d1-3c2c-444a-8b89-b417e7324b78\") " pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985305 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-mountpoint-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985348 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-registration-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985396 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89hmp\" (UniqueName: \"kubernetes.io/projected/e03572cc-cf26-4ed2-bb88-ae6d3150b904-kube-api-access-89hmp\") pod \"multus-admission-controller-857f4d67dd-r87pw\" (UID: \"e03572cc-cf26-4ed2-bb88-ae6d3150b904\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985510 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03b7032d-fb47-4f5c-95b8-f69f58b65db7-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-6r57j\" (UID: \"03b7032d-fb47-4f5c-95b8-f69f58b65db7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.985547 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdrx8\" (UniqueName: \"kubernetes.io/projected/daf0456f-25d3-4dee-8eac-49f0056aa251-kube-api-access-wdrx8\") pod \"kube-storage-version-migrator-operator-b67b599dd-p4n6h\" (UID: \"daf0456f-25d3-4dee-8eac-49f0056aa251\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.987893 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ea0878fe-ac62-445d-87c1-248128612682-signing-cabundle\") pod \"service-ca-9c57cc56f-j5z2q\" (UID: \"ea0878fe-ac62-445d-87c1-248128612682\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.990691 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2680126d-1cf3-4cbd-a130-3d8d0070a394-config-volume\") pod \"collect-profiles-29536815-fc4ph\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.992934 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-csi-data-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.992991 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a74a9206-b910-48c2-8448-29baf6140688-serving-cert\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.993515 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebd19e25-96f6-454f-9472-d504d15a4821-config\") pod \"service-ca-operator-777779d784-jxspb\" (UID: \"ebd19e25-96f6-454f-9472-d504d15a4821\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.993517 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-images\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.993623 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/daf0456f-25d3-4dee-8eac-49f0056aa251-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-p4n6h\" (UID: \"daf0456f-25d3-4dee-8eac-49f0056aa251\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.993942 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-plugins-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.994326 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/578d81ae-01aa-4cc7-bdee-de283490661d-srv-cert\") pod \"olm-operator-6b444d44fb-k4pxk\" (UID: \"578d81ae-01aa-4cc7-bdee-de283490661d\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.994524 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/daf0456f-25d3-4dee-8eac-49f0056aa251-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-p4n6h\" (UID: \"daf0456f-25d3-4dee-8eac-49f0056aa251\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.994712 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a74a9206-b910-48c2-8448-29baf6140688-service-ca-bundle\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.994768 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-socket-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.995229 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-auth-proxy-config\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.995332 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59eee97a-55e1-4400-8f57-ab0781947114-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-blvwj\" (UID: \"59eee97a-55e1-4400-8f57-ab0781947114\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.995647 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/10b563d1-9d53-4755-9b40-4f907b6ea224-webhook-cert\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.995718 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/10b563d1-9d53-4755-9b40-4f907b6ea224-tmpfs\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.996764 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddcb9142-ff54-44b3-bb37-0e4b103c407e-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vrmbr\" (UID: \"ddcb9142-ff54-44b3-bb37-0e4b103c407e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.998050 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03b7032d-fb47-4f5c-95b8-f69f58b65db7-config\") pod \"kube-controller-manager-operator-78b949d7b-6r57j\" (UID: \"03b7032d-fb47-4f5c-95b8-f69f58b65db7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.998126 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e03572cc-cf26-4ed2-bb88-ae6d3150b904-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-r87pw\" (UID: \"e03572cc-cf26-4ed2-bb88-ae6d3150b904\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.998487 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2680126d-1cf3-4cbd-a130-3d8d0070a394-secret-volume\") pod \"collect-profiles-29536815-fc4ph\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.999269 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/50c130e9-87b3-43aa-a620-8faefd1add54-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sk82j\" (UID: \"50c130e9-87b3-43aa-a620-8faefd1add54\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.999596 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7c1d7708-439d-4d51-ab68-0a2058480646-metrics-tls\") pod \"dns-default-vf5cg\" (UID: \"7c1d7708-439d-4d51-ab68-0a2058480646\") " pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:33 crc kubenswrapper[4751]: I0227 16:27:33.999816 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7c1d7708-439d-4d51-ab68-0a2058480646-config-volume\") pod \"dns-default-vf5cg\" (UID: \"7c1d7708-439d-4d51-ab68-0a2058480646\") " pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:33.999981 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-registration-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.000026 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d474cfdd-550b-471d-aea1-6bc3f8532fa5-mountpoint-dir\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.000201 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/4155e5dc-eb83-4d58-bb2b-554fcbda2e8c-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-rjqrq\" (UID: \"4155e5dc-eb83-4d58-bb2b-554fcbda2e8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.000502 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a74a9206-b910-48c2-8448-29baf6140688-config\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.000731 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mw4mn\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.001553 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7b0a19d1-3c2c-444a-8b89-b417e7324b78-certs\") pod \"machine-config-server-hfzxd\" (UID: \"7b0a19d1-3c2c-444a-8b89-b417e7324b78\") " pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.001790 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/92ccd256-d5a2-4857-8acb-f11bc462ac9c-srv-cert\") pod \"catalog-operator-68c6474976-bfl2l\" (UID: \"92ccd256-d5a2-4857-8acb-f11bc462ac9c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.002513 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddcb9142-ff54-44b3-bb37-0e4b103c407e-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vrmbr\" (UID: \"ddcb9142-ff54-44b3-bb37-0e4b103c407e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.002892 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-proxy-tls\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.004373 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03b7032d-fb47-4f5c-95b8-f69f58b65db7-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-6r57j\" (UID: \"03b7032d-fb47-4f5c-95b8-f69f58b65db7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.004619 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/10b563d1-9d53-4755-9b40-4f907b6ea224-apiservice-cert\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.004629 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/578d81ae-01aa-4cc7-bdee-de283490661d-profile-collector-cert\") pod \"olm-operator-6b444d44fb-k4pxk\" (UID: \"578d81ae-01aa-4cc7-bdee-de283490661d\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.005522 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/526f8eee-e82b-4a1b-93ff-46a732856a7c-cert\") pod \"ingress-canary-4848q\" (UID: \"526f8eee-e82b-4a1b-93ff-46a732856a7c\") " pod="openshift-ingress-canary/ingress-canary-4848q" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.006353 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ml4k4\" (UniqueName: \"kubernetes.io/projected/f3483f04-316b-403b-9117-b744e8bc5c3f-kube-api-access-ml4k4\") pod \"cluster-samples-operator-665b6dd947-gjlwd\" (UID: \"f3483f04-316b-403b-9117-b744e8bc5c3f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.008518 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7b0a19d1-3c2c-444a-8b89-b417e7324b78-node-bootstrap-token\") pod \"machine-config-server-hfzxd\" (UID: \"7b0a19d1-3c2c-444a-8b89-b417e7324b78\") " pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.009896 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.010349 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ea0878fe-ac62-445d-87c1-248128612682-signing-key\") pod \"service-ca-9c57cc56f-j5z2q\" (UID: \"ea0878fe-ac62-445d-87c1-248128612682\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.010695 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mw4mn\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.011051 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/89171c06-8a67-420c-bb2e-0608ceb22697-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h46g5\" (UID: \"89171c06-8a67-420c-bb2e-0608ceb22697\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.013492 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/92ccd256-d5a2-4857-8acb-f11bc462ac9c-profile-collector-cert\") pod \"catalog-operator-68c6474976-bfl2l\" (UID: \"92ccd256-d5a2-4857-8acb-f11bc462ac9c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.014394 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78xgx\" (UniqueName: \"kubernetes.io/projected/714d38ad-6a1e-4d9d-98ce-22a8582a6429-kube-api-access-78xgx\") pod \"etcd-operator-b45778765-8fhbk\" (UID: \"714d38ad-6a1e-4d9d-98ce-22a8582a6429\") " pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.035289 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-bound-sa-token\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.036534 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.066098 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cvth\" (UniqueName: \"kubernetes.io/projected/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-kube-api-access-9cvth\") pod \"oauth-openshift-558db77b4-dvwqp\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.070861 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.080460 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdp4q\" (UniqueName: \"kubernetes.io/projected/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-kube-api-access-sdp4q\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.089297 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:34 crc kubenswrapper[4751]: E0227 16:27:34.090124 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:34.590102233 +0000 UTC m=+216.737116700 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.094529 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f44df15c-1cd2-44eb-a4ac-efca1d06d3b3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5svll\" (UID: \"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.104878 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-fqng2"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.107289 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxfpx\" (UniqueName: \"kubernetes.io/projected/7c6775c9-9034-4be4-8002-201a25a35eab-kube-api-access-bxfpx\") pod \"route-controller-manager-6576b87f9c-rczbk\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.119987 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-5f8sr" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.128522 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngdrh\" (UniqueName: \"kubernetes.io/projected/4b967e25-f884-4c9b-8307-cd4b669bbf76-kube-api-access-ngdrh\") pod \"machine-approver-56656f9798-wwdf6\" (UID: \"4b967e25-f884-4c9b-8307-cd4b669bbf76\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.140750 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.141007 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.159850 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drxnk\" (UniqueName: \"kubernetes.io/projected/e7e88532-9a76-44df-b084-bfd9c96457c7-kube-api-access-drxnk\") pod \"console-operator-58897d9998-7gs6x\" (UID: \"e7e88532-9a76-44df-b084-bfd9c96457c7\") " pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.169724 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9592t\" (UniqueName: \"kubernetes.io/projected/ef47b0ab-908a-4d99-9517-32a5984070fb-kube-api-access-9592t\") pod \"openshift-config-operator-7777fb866f-2c2zk\" (UID: \"ef47b0ab-908a-4d99-9517-32a5984070fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.170331 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" Feb 27 16:27:34 crc kubenswrapper[4751]: W0227 16:27:34.187507 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd1d7d7d_297e_4d67_94c2_746c0295105f.slice/crio-ec6413a585d1791d53684d85b832fda6c6b0699fcc98a931656cdbeb8e6f31c7 WatchSource:0}: Error finding container ec6413a585d1791d53684d85b832fda6c6b0699fcc98a931656cdbeb8e6f31c7: Status 404 returned error can't find the container with id ec6413a585d1791d53684d85b832fda6c6b0699fcc98a931656cdbeb8e6f31c7 Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.191009 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.191027 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wblv\" (UniqueName: \"kubernetes.io/projected/ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3-kube-api-access-4wblv\") pod \"machine-api-operator-5694c8668f-7hctb\" (UID: \"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:34 crc kubenswrapper[4751]: E0227 16:27:34.191319 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:34.691302315 +0000 UTC m=+216.838316832 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.204757 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.212635 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4svm\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-kube-api-access-v4svm\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.217812 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.218517 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-htn5q"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.231393 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.235285 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xc9cw\" (UniqueName: \"kubernetes.io/projected/51bdc38f-9b69-437f-9a17-edb963fb01ab-kube-api-access-xc9cw\") pod \"router-default-5444994796-hsqjr\" (UID: \"51bdc38f-9b69-437f-9a17-edb963fb01ab\") " pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.250744 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-knf9q"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.257455 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppbrc\" (UniqueName: \"kubernetes.io/projected/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-kube-api-access-ppbrc\") pod \"console-f9d7485db-hb87p\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.272025 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxwqn\" (UniqueName: \"kubernetes.io/projected/fbe81302-7306-4254-b54b-a91b118197a3-kube-api-access-pxwqn\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjwh6\" (UID: \"fbe81302-7306-4254-b54b-a91b118197a3\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.292225 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:34 crc kubenswrapper[4751]: E0227 16:27:34.292828 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:34.792807116 +0000 UTC m=+216.939821563 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.311708 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff68n\" (UniqueName: \"kubernetes.io/projected/92ccd256-d5a2-4857-8acb-f11bc462ac9c-kube-api-access-ff68n\") pod \"catalog-operator-68c6474976-bfl2l\" (UID: \"92ccd256-d5a2-4857-8acb-f11bc462ac9c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.329196 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-dvwqp"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.338630 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7bgh\" (UniqueName: \"kubernetes.io/projected/11a526fe-64f1-4da8-a0e8-ed276ec069fb-kube-api-access-m7bgh\") pod \"auto-csr-approver-29536826-mxq7k\" (UID: \"11a526fe-64f1-4da8-a0e8-ed276ec069fb\") " pod="openshift-infra/auto-csr-approver-29536826-mxq7k" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.351750 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xf5zk\" (UniqueName: \"kubernetes.io/projected/59eee97a-55e1-4400-8f57-ab0781947114-kube-api-access-xf5zk\") pod \"openshift-apiserver-operator-796bbdcf4f-blvwj\" (UID: \"59eee97a-55e1-4400-8f57-ab0781947114\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.351946 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" Feb 27 16:27:34 crc kubenswrapper[4751]: W0227 16:27:34.358730 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8b87a6b_b1cf_4f0f_b46e_9a580676f9d3.slice/crio-7c38d54f6c21245fb0ecce63d398f4e9fdf3aef16d5b5b434202dbd5ad189327 WatchSource:0}: Error finding container 7c38d54f6c21245fb0ecce63d398f4e9fdf3aef16d5b5b434202dbd5ad189327: Status 404 returned error can't find the container with id 7c38d54f6c21245fb0ecce63d398f4e9fdf3aef16d5b5b434202dbd5ad189327 Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.359091 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.375705 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-629gj\" (UniqueName: \"kubernetes.io/projected/526f8eee-e82b-4a1b-93ff-46a732856a7c-kube-api-access-629gj\") pod \"ingress-canary-4848q\" (UID: \"526f8eee-e82b-4a1b-93ff-46a732856a7c\") " pod="openshift-ingress-canary/ingress-canary-4848q" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.381556 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-5f8sr"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.393637 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:34 crc kubenswrapper[4751]: E0227 16:27:34.394612 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:34.894597954 +0000 UTC m=+217.041612401 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.395308 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.401659 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ddcb9142-ff54-44b3-bb37-0e4b103c407e-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vrmbr\" (UID: \"ddcb9142-ff54-44b3-bb37-0e4b103c407e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.405151 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.408775 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xjpr\" (UniqueName: \"kubernetes.io/projected/a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95-kube-api-access-2xjpr\") pod \"machine-config-operator-74547568cd-297f4\" (UID: \"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.415946 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-4848q" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.434792 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.463052 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.463639 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmnjn\" (UniqueName: \"kubernetes.io/projected/d474cfdd-550b-471d-aea1-6bc3f8532fa5-kube-api-access-mmnjn\") pod \"csi-hostpathplugin-qs2km\" (UID: \"d474cfdd-550b-471d-aea1-6bc3f8532fa5\") " pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.472543 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mc52\" (UniqueName: \"kubernetes.io/projected/4155e5dc-eb83-4d58-bb2b-554fcbda2e8c-kube-api-access-7mc52\") pod \"control-plane-machine-set-operator-78cbb6b69f-rjqrq\" (UID: \"4155e5dc-eb83-4d58-bb2b-554fcbda2e8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.474932 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdrx8\" (UniqueName: \"kubernetes.io/projected/daf0456f-25d3-4dee-8eac-49f0056aa251-kube-api-access-wdrx8\") pod \"kube-storage-version-migrator-operator-b67b599dd-p4n6h\" (UID: \"daf0456f-25d3-4dee-8eac-49f0056aa251\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.490371 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69xtc\" (UniqueName: \"kubernetes.io/projected/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-kube-api-access-69xtc\") pod \"marketplace-operator-79b997595-mw4mn\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.495577 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:34 crc kubenswrapper[4751]: E0227 16:27:34.496057 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:34.996044043 +0000 UTC m=+217.143058490 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.525226 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.525822 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmcq7\" (UniqueName: \"kubernetes.io/projected/2680126d-1cf3-4cbd-a130-3d8d0070a394-kube-api-access-bmcq7\") pod \"collect-profiles-29536815-fc4ph\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.534543 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7vxx\" (UniqueName: \"kubernetes.io/projected/ea0878fe-ac62-445d-87c1-248128612682-kube-api-access-c7vxx\") pod \"service-ca-9c57cc56f-j5z2q\" (UID: \"ea0878fe-ac62-445d-87c1-248128612682\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.551987 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rllc\" (UniqueName: \"kubernetes.io/projected/7c1d7708-439d-4d51-ab68-0a2058480646-kube-api-access-2rllc\") pod \"dns-default-vf5cg\" (UID: \"7c1d7708-439d-4d51-ab68-0a2058480646\") " pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.559943 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.573492 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbfvf\" (UniqueName: \"kubernetes.io/projected/520e0111-144a-404b-8dee-61fb546bc717-kube-api-access-wbfvf\") pod \"migrator-59844c95c7-vnzzq\" (UID: \"520e0111-144a-404b-8dee-61fb546bc717\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.586824 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.588208 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.588240 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.596097 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/03b7032d-fb47-4f5c-95b8-f69f58b65db7-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-6r57j\" (UID: \"03b7032d-fb47-4f5c-95b8-f69f58b65db7\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.597880 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:34 crc kubenswrapper[4751]: E0227 16:27:34.598165 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:35.098149919 +0000 UTC m=+217.245164376 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.615591 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536826-mxq7k" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.620634 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkt8f\" (UniqueName: \"kubernetes.io/projected/578d81ae-01aa-4cc7-bdee-de283490661d-kube-api-access-xkt8f\") pod \"olm-operator-6b444d44fb-k4pxk\" (UID: \"578d81ae-01aa-4cc7-bdee-de283490661d\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.628282 4751 generic.go:334] "Generic (PLEG): container finished" podID="6490b89d-117d-4d8c-b625-b02d0404c882" containerID="f84d303270c192d60de463072e9cbe8beeaeb43cb103a282b35804a722a7939b" exitCode=0 Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.628461 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" event={"ID":"6490b89d-117d-4d8c-b625-b02d0404c882","Type":"ContainerDied","Data":"f84d303270c192d60de463072e9cbe8beeaeb43cb103a282b35804a722a7939b"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.629679 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.631931 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.636040 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" event={"ID":"14cd3902-6c39-409b-88f9-ddb6a23bc450","Type":"ContainerStarted","Data":"10770051a353540b48ac3334b0a9bff5caace855071f967ce0deaabd5fc5a091"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.637637 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" event={"ID":"4b967e25-f884-4c9b-8307-cd4b669bbf76","Type":"ContainerStarted","Data":"5770916fb70c962db59c0c3b500d402e75f9dafe8fee5de7133be4f1e9a2bc8c"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.641685 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" event={"ID":"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70","Type":"ContainerStarted","Data":"3939b8d00b4e1389f32d019d91b5418e28dffcec1f673bce2f22dbf371ec872f"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.641954 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7v5t\" (UniqueName: \"kubernetes.io/projected/a74a9206-b910-48c2-8448-29baf6140688-kube-api-access-r7v5t\") pod \"authentication-operator-69f744f599-txmh7\" (UID: \"a74a9206-b910-48c2-8448-29baf6140688\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.644443 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" event={"ID":"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3","Type":"ContainerStarted","Data":"7c38d54f6c21245fb0ecce63d398f4e9fdf3aef16d5b5b434202dbd5ad189327"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.645765 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.649678 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" event={"ID":"0e28f47c-623d-4724-bcec-80d38d902eba","Type":"ContainerStarted","Data":"ba4a1b50f9e737df97cd6ddb182d59458204d917077caa738887b4d0d9ac2625"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.650767 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j84rz\" (UniqueName: \"kubernetes.io/projected/50c130e9-87b3-43aa-a620-8faefd1add54-kube-api-access-j84rz\") pod \"machine-config-controller-84d6567774-sk82j\" (UID: \"50c130e9-87b3-43aa-a620-8faefd1add54\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.650941 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-5f8sr" event={"ID":"ee5f53d5-2c38-465d-ad33-d5c0c5eb3923","Type":"ContainerStarted","Data":"3b17f850bfeb56e6d23c60a23ce6248627dc0f8f18f56f87c9297b3f09fb51b2"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.652373 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" event={"ID":"cd1d7d7d-297e-4d67-94c2-746c0295105f","Type":"ContainerStarted","Data":"33212c20774ddd054add68e214be696bcf4514fb403b92ce095b6b31a2d7b85b"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.652433 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" event={"ID":"cd1d7d7d-297e-4d67-94c2-746c0295105f","Type":"ContainerStarted","Data":"ec6413a585d1791d53684d85b832fda6c6b0699fcc98a931656cdbeb8e6f31c7"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.653954 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" event={"ID":"8de82997-0daf-469d-ba5e-23fcaaa04614","Type":"ContainerStarted","Data":"b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.653972 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" event={"ID":"8de82997-0daf-469d-ba5e-23fcaaa04614","Type":"ContainerStarted","Data":"4701c4676c18f2ff2b531b324c2ee0b7ef2bc8bccc9466ef74c96d76c80e9e35"} Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.657739 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.660366 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.660798 4751 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-htn5q container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.660835 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" podUID="8de82997-0daf-469d-ba5e-23fcaaa04614" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.666660 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.672925 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.674143 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.679334 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.680324 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x75dp\" (UniqueName: \"kubernetes.io/projected/7b0a19d1-3c2c-444a-8b89-b417e7324b78-kube-api-access-x75dp\") pod \"machine-config-server-hfzxd\" (UID: \"7b0a19d1-3c2c-444a-8b89-b417e7324b78\") " pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.683774 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.686364 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.694591 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thght\" (UniqueName: \"kubernetes.io/projected/ebd19e25-96f6-454f-9472-d504d15a4821-kube-api-access-thght\") pod \"service-ca-operator-777779d784-jxspb\" (UID: \"ebd19e25-96f6-454f-9472-d504d15a4821\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.694594 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-8fhbk"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.699540 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.699663 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" Feb 27 16:27:34 crc kubenswrapper[4751]: E0227 16:27:34.700480 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:35.200457191 +0000 UTC m=+217.347471688 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.708876 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.711541 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62rln\" (UniqueName: \"kubernetes.io/projected/10b563d1-9d53-4755-9b40-4f907b6ea224-kube-api-access-62rln\") pod \"packageserver-d55dfcdfc-hn7fm\" (UID: \"10b563d1-9d53-4755-9b40-4f907b6ea224\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.719079 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-qs2km" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.733922 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzp6x\" (UniqueName: \"kubernetes.io/projected/89171c06-8a67-420c-bb2e-0608ceb22697-kube-api-access-tzp6x\") pod \"package-server-manager-789f6589d5-h46g5\" (UID: \"89171c06-8a67-420c-bb2e-0608ceb22697\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.735725 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.777849 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89hmp\" (UniqueName: \"kubernetes.io/projected/e03572cc-cf26-4ed2-bb88-ae6d3150b904-kube-api-access-89hmp\") pod \"multus-admission-controller-857f4d67dd-r87pw\" (UID: \"e03572cc-cf26-4ed2-bb88-ae6d3150b904\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.781804 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-4848q"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.792529 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk"] Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.801103 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:34 crc kubenswrapper[4751]: E0227 16:27:34.802658 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:35.30264656 +0000 UTC m=+217.449661007 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.838503 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.875993 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.879666 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.902308 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:34 crc kubenswrapper[4751]: E0227 16:27:34.902453 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:35.402435785 +0000 UTC m=+217.549450232 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.902599 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:34 crc kubenswrapper[4751]: E0227 16:27:34.902977 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:35.402965519 +0000 UTC m=+217.549979966 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.922026 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.937691 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.952321 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-hfzxd" Feb 27 16:27:34 crc kubenswrapper[4751]: I0227 16:27:34.992611 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.010674 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.011135 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:35.511116394 +0000 UTC m=+217.658130841 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.089467 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-7hctb"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.093541 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.112057 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.112463 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:35.61245003 +0000 UTC m=+217.759464477 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.121690 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7gs6x"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.134372 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.164253 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-hb87p"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.214037 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.214645 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:35.714618618 +0000 UTC m=+217.861633065 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.226899 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qs2km"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.294301 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.316500 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.316896 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:35.816882779 +0000 UTC m=+217.963897226 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: W0227 16:27:35.332971 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podee39d7ed_b569_4c34_8c19_a5f386c85b5c.slice/crio-bb0394270f0749a331489903ff13a2df3a100679b1bb38008c539dd6b8118877 WatchSource:0}: Error finding container bb0394270f0749a331489903ff13a2df3a100679b1bb38008c539dd6b8118877: Status 404 returned error can't find the container with id bb0394270f0749a331489903ff13a2df3a100679b1bb38008c539dd6b8118877 Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.339858 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.347022 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.372047 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536826-mxq7k"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.373329 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-j5z2q"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.409524 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mw4mn"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.417797 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.418087 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:35.918062831 +0000 UTC m=+218.065077278 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.429514 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.484602 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.520320 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.520689 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.020673521 +0000 UTC m=+218.167687968 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.534614 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.579150 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.620865 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.621603 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.121587825 +0000 UTC m=+218.268602272 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.670878 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" event={"ID":"92ccd256-d5a2-4857-8acb-f11bc462ac9c","Type":"ContainerStarted","Data":"0c94d021022d133152e7cd1989e8c4429affde666067fa3a507ec7710cff4128"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.719873 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" event={"ID":"f3483f04-316b-403b-9117-b744e8bc5c3f","Type":"ContainerStarted","Data":"5332fedc9f33eb25833a4bda18b4e0e395601ff056915aae76f3006bb08cd7cb"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.719927 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" event={"ID":"f3483f04-316b-403b-9117-b744e8bc5c3f","Type":"ContainerStarted","Data":"d956a56b3564dd652a47f30dc5f5c613e22f5d91bfec8e374c59150d0ba3ded2"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.722567 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.723029 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.223005573 +0000 UTC m=+218.370020020 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.730666 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" event={"ID":"cd1d7d7d-297e-4d67-94c2-746c0295105f","Type":"ContainerStarted","Data":"ede9c8ca7452fbe6bf738273fc97935c634540c130d7f3e59964efe24aa9d65a"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.733647 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" event={"ID":"59eee97a-55e1-4400-8f57-ab0781947114","Type":"ContainerStarted","Data":"35abe18971ba89a878287e73d27e003807ffa29b57688cebb1882f5cf27fc969"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.756083 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" event={"ID":"14cd3902-6c39-409b-88f9-ddb6a23bc450","Type":"ContainerStarted","Data":"519f19cae1c965eb9734c169db86dad644fc6dc6a785bf42ce440d07db7c0af6"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.766166 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.774485 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.780477 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-297f4"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.818176 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" event={"ID":"6efd7ce0-9b49-4c51-accf-3efcfc1188e6","Type":"ContainerStarted","Data":"64ca60642130ddd2d989b28672936ee38d040ddfdf2e67c1d1652207a1914332"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.824320 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.824963 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.324937585 +0000 UTC m=+218.471952032 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.825290 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.827737 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.327716728 +0000 UTC m=+218.474731405 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.830890 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" event={"ID":"714d38ad-6a1e-4d9d-98ce-22a8582a6429","Type":"ContainerStarted","Data":"4304f0122294bb24f6f48f10c58957af4dcb3e15c3c59b4c0e890b18df197a6b"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.839293 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" event={"ID":"ea0878fe-ac62-445d-87c1-248128612682","Type":"ContainerStarted","Data":"d2ac1df05fb7bfc5ad1f6dc405a5056917b13dd706fb219411c9ed4a4c166a91"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.845286 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-7gs6x" event={"ID":"e7e88532-9a76-44df-b084-bfd9c96457c7","Type":"ContainerStarted","Data":"f5653009867aef069d648459e2f76c73ec1d55a9c13e84c850f34034ab7c71dc"} Feb 27 16:27:35 crc kubenswrapper[4751]: W0227 16:27:35.851203 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4155e5dc_eb83_4d58_bb2b_554fcbda2e8c.slice/crio-d9fcb8b5febf80bd0576afc7443f67ef30e49c7908b507485f72edaf5ef5db14 WatchSource:0}: Error finding container d9fcb8b5febf80bd0576afc7443f67ef30e49c7908b507485f72edaf5ef5db14: Status 404 returned error can't find the container with id d9fcb8b5febf80bd0576afc7443f67ef30e49c7908b507485f72edaf5ef5db14 Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.859159 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" event={"ID":"50c130e9-87b3-43aa-a620-8faefd1add54","Type":"ContainerStarted","Data":"36720f2187243788d1c8da8a7e708acead26b113d7a5e9c32ef2561defe26902"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.917078 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-vf5cg"] Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.929931 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" event={"ID":"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3","Type":"ContainerStarted","Data":"0c8feeeb630d4d1bac463f691d334a54c2333320941c189ee6533845ba44243a"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.930184 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.930336 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.430314907 +0000 UTC m=+218.577329424 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.930649 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:35 crc kubenswrapper[4751]: E0227 16:27:35.930964 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.430956944 +0000 UTC m=+218.577971391 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.935646 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-4848q" event={"ID":"526f8eee-e82b-4a1b-93ff-46a732856a7c","Type":"ContainerStarted","Data":"ea4db6690599560f79c88d5012af2c43483a02df0899d2622de6b650504c26a6"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.958032 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" event={"ID":"0e28f47c-623d-4724-bcec-80d38d902eba","Type":"ContainerStarted","Data":"b33011e90b3836834c4eeb2b377f98beef2919653a4493acd477bcacb8ddb830"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.964373 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" event={"ID":"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3","Type":"ContainerStarted","Data":"feba16e2c1782b77876a52c76ffe9b9a160b4c401578aa9df430cd6bb8839e1b"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.978314 4751 generic.go:334] "Generic (PLEG): container finished" podID="c3fb94cc-a0da-4ecb-b0ba-a4666e264e70" containerID="c5083c6a6dcb74205151a5b82cdacd98ff07ba7e62f5f8552e53540ac225412c" exitCode=0 Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.978688 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" event={"ID":"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70","Type":"ContainerDied","Data":"c5083c6a6dcb74205151a5b82cdacd98ff07ba7e62f5f8552e53540ac225412c"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.984150 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" event={"ID":"ddcb9142-ff54-44b3-bb37-0e4b103c407e","Type":"ContainerStarted","Data":"6291c8a159269189614b73c8f38a36ca4d6ae7cc1ddcdb4447c928a38555fa05"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.986608 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" event={"ID":"7c6775c9-9034-4be4-8002-201a25a35eab","Type":"ContainerStarted","Data":"bfd01c651815c23c54b27d9531b599311c5940a0916292d01b048782ae249664"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.987696 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.990442 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" event={"ID":"fbe81302-7306-4254-b54b-a91b118197a3","Type":"ContainerStarted","Data":"760fd88921af3c45891b51f9df8160ad3654a6d709d102f3d1c218dc68de66b2"} Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.998444 4751 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-rczbk container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.998503 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" podUID="7c6775c9-9034-4be4-8002-201a25a35eab" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Feb 27 16:27:35 crc kubenswrapper[4751]: I0227 16:27:35.998774 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-hfzxd" event={"ID":"7b0a19d1-3c2c-444a-8b89-b417e7324b78","Type":"ContainerStarted","Data":"03e76daa3e06a9f5921e42dee6d35355777db3ea820ac5f3038aeb51806a4991"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.000608 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-hb87p" event={"ID":"ee39d7ed-b569-4c34-8c19-a5f386c85b5c","Type":"ContainerStarted","Data":"bb0394270f0749a331489903ff13a2df3a100679b1bb38008c539dd6b8118877"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.009859 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" event={"ID":"2680126d-1cf3-4cbd-a130-3d8d0070a394","Type":"ContainerStarted","Data":"981587c0e32f44253dd1949a9fdb4ccc4b9af02fcde65721f31b0bcce65393cc"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.016047 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" event={"ID":"ef47b0ab-908a-4d99-9517-32a5984070fb","Type":"ContainerStarted","Data":"a93d960f689e5dc29d987c0e999f8cf5fd2ff98de007a209cb4c8165b50c8777"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.031098 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qs2km" event={"ID":"d474cfdd-550b-471d-aea1-6bc3f8532fa5","Type":"ContainerStarted","Data":"9b2a6a68906934fda9b45d3be715ce112765d45703a120fbee598ff0e903b055"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.034139 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536826-mxq7k" event={"ID":"11a526fe-64f1-4da8-a0e8-ed276ec069fb","Type":"ContainerStarted","Data":"0eb57bbe98032acdbd5c77d394759db5911f655ff6cf9dcc4de3c5d6ee154733"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.035918 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.036223 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.536202243 +0000 UTC m=+218.683216690 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.046646 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-r87pw"] Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.050575 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm"] Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.062293 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-5f8sr" event={"ID":"ee5f53d5-2c38-465d-ad33-d5c0c5eb3923","Type":"ContainerStarted","Data":"7e7206d58f9fac48b87d199556feca545d299d29fa40f8326a52ea494929fcce"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.071742 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" event={"ID":"4b967e25-f884-4c9b-8307-cd4b669bbf76","Type":"ContainerStarted","Data":"f2edd473054e419ed4847e72c6712ee7a7525485f55870db55c0bd6b9fec0ec4"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.074254 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" event={"ID":"578d81ae-01aa-4cc7-bdee-de283490661d","Type":"ContainerStarted","Data":"66b2180187099e6ae014dc66a2247ec37ffef35491c308f93894c460c894c392"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.078953 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" event={"ID":"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3","Type":"ContainerStarted","Data":"7951bf3551c34edcc37720b2a3862a129ac15eaad78f62464abbcc3807c4212e"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.081142 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.086556 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-hsqjr" event={"ID":"51bdc38f-9b69-437f-9a17-edb963fb01ab","Type":"ContainerStarted","Data":"dcab023be2cfa48ee5ac4ff62df60a03bdd2e1533f3c3191f96dec5ba46667d7"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.086638 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-hsqjr" event={"ID":"51bdc38f-9b69-437f-9a17-edb963fb01ab","Type":"ContainerStarted","Data":"c948d35610aa6975a23820f7bfd77392edb80c5e65cc1c035617d27b16637ba0"} Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.093284 4751 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-dvwqp container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.19:6443/healthz\": dial tcp 10.217.0.19:6443: connect: connection refused" start-of-body= Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.093390 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" podUID="d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.19:6443/healthz\": dial tcp 10.217.0.19:6443: connect: connection refused" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.093777 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.106469 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5"] Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.122194 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jxspb"] Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.137760 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-txmh7"] Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.144523 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.146594 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.646571566 +0000 UTC m=+218.793586013 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: W0227 16:27:36.186665 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89171c06_8a67_420c_bb2e_0608ceb22697.slice/crio-d4a14bb04a02a30a6f0a5eaf3ab1ba31a30f703ae40ee726c2be32ef72cfb5f9 WatchSource:0}: Error finding container d4a14bb04a02a30a6f0a5eaf3ab1ba31a30f703ae40ee726c2be32ef72cfb5f9: Status 404 returned error can't find the container with id d4a14bb04a02a30a6f0a5eaf3ab1ba31a30f703ae40ee726c2be32ef72cfb5f9 Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.245108 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j"] Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.245655 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.262178 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.762152675 +0000 UTC m=+218.909167122 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.365187 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.365603 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.865591457 +0000 UTC m=+219.012605894 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.387672 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" podStartSLOduration=175.387656975 podStartE2EDuration="2m55.387656975s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:36.356863918 +0000 UTC m=+218.503878365" watchObservedRunningTime="2026-02-27 16:27:36.387656975 +0000 UTC m=+218.534671422" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.388824 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" podStartSLOduration=175.388819866 podStartE2EDuration="2m55.388819866s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:36.387587843 +0000 UTC m=+218.534602280" watchObservedRunningTime="2026-02-27 16:27:36.388819866 +0000 UTC m=+218.535834313" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.424426 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" podStartSLOduration=175.424394528 podStartE2EDuration="2m55.424394528s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:36.422970591 +0000 UTC m=+218.569985038" watchObservedRunningTime="2026-02-27 16:27:36.424394528 +0000 UTC m=+218.571408975" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.466034 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.466323 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.466350 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.466375 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.472848 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:36.972816407 +0000 UTC m=+219.119830854 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.473577 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.485899 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.490416 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.535550 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:36 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:36 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:36 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.535596 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.537649 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.538764 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.543638 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-5f8sr" podStartSLOduration=175.543622363 podStartE2EDuration="2m55.543622363s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:36.53855432 +0000 UTC m=+218.685568767" watchObservedRunningTime="2026-02-27 16:27:36.543622363 +0000 UTC m=+218.690636810" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.567501 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.567667 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.567704 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.567778 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.568034 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.068021133 +0000 UTC m=+219.215035570 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.590089 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7da183a7-dcda-4e22-b135-b1ef0d593811-metrics-certs\") pod \"network-metrics-daemon-4bnbv\" (UID: \"7da183a7-dcda-4e22-b135-b1ef0d593811\") " pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.622960 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.641158 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4bnbv" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.657619 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" podStartSLOduration=175.657602321 podStartE2EDuration="2m55.657602321s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:36.651792939 +0000 UTC m=+218.798807376" watchObservedRunningTime="2026-02-27 16:27:36.657602321 +0000 UTC m=+218.804616768" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.671984 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.672164 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.172138392 +0000 UTC m=+219.319152839 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.672297 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.672694 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.172687406 +0000 UTC m=+219.319701853 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.686021 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zjprp" podStartSLOduration=175.686003035 podStartE2EDuration="2m55.686003035s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:36.682787661 +0000 UTC m=+218.829802108" watchObservedRunningTime="2026-02-27 16:27:36.686003035 +0000 UTC m=+218.833017482" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.742730 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-sblbf" podStartSLOduration=174.742697901 podStartE2EDuration="2m54.742697901s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:36.711057122 +0000 UTC m=+218.858071569" watchObservedRunningTime="2026-02-27 16:27:36.742697901 +0000 UTC m=+218.889712348" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.746679 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-hsqjr" podStartSLOduration=175.746666545 podStartE2EDuration="2m55.746666545s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:36.740320059 +0000 UTC m=+218.887334506" watchObservedRunningTime="2026-02-27 16:27:36.746666545 +0000 UTC m=+218.893680992" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.774155 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.774454 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.274439783 +0000 UTC m=+219.421454230 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.803265 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" podStartSLOduration=175.803244308 podStartE2EDuration="2m55.803244308s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:36.767312737 +0000 UTC m=+218.914327184" watchObservedRunningTime="2026-02-27 16:27:36.803244308 +0000 UTC m=+218.950258755" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.803462 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" podStartSLOduration=174.803457084 podStartE2EDuration="2m54.803457084s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:36.801979725 +0000 UTC m=+218.948994172" watchObservedRunningTime="2026-02-27 16:27:36.803457084 +0000 UTC m=+218.950471531" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.847614 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.877045 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.877474 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.377389072 +0000 UTC m=+219.524403519 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.979170 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.979438 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.479396126 +0000 UTC m=+219.626410563 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:36 crc kubenswrapper[4751]: I0227 16:27:36.979659 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:36 crc kubenswrapper[4751]: E0227 16:27:36.979993 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.479979531 +0000 UTC m=+219.626993978 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.080574 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:37 crc kubenswrapper[4751]: E0227 16:27:37.080888 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.580872836 +0000 UTC m=+219.727887283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.120584 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" event={"ID":"a74a9206-b910-48c2-8448-29baf6140688","Type":"ContainerStarted","Data":"0d7bfe268ded4bc97bcfb866e185eb367c72b63949e05629e2013483a5468b31"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.127890 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" event={"ID":"59eee97a-55e1-4400-8f57-ab0781947114","Type":"ContainerStarted","Data":"d5d9c588cd02ca7f96418f23887e5d1821adbbab4c5c7e6213411fe861e36928"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.139789 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-fqng2" event={"ID":"0e28f47c-623d-4724-bcec-80d38d902eba","Type":"ContainerStarted","Data":"d3cd3a8a7fbcaa578baee52e0e46799c14e0cb7fa7639421c8c2277bce52edcf"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.153691 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-blvwj" podStartSLOduration=176.153669384 podStartE2EDuration="2m56.153669384s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.148306643 +0000 UTC m=+219.295321090" watchObservedRunningTime="2026-02-27 16:27:37.153669384 +0000 UTC m=+219.300683831" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.163574 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" event={"ID":"e03572cc-cf26-4ed2-bb88-ae6d3150b904","Type":"ContainerStarted","Data":"e8493cc13a9ae4ec426403a63321c06bbddcfedd4bf444659ccd2ddda78dde7d"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.182637 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:37 crc kubenswrapper[4751]: E0227 16:27:37.183713 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.68366821 +0000 UTC m=+219.830682657 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.200873 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5svll" event={"ID":"f44df15c-1cd2-44eb-a4ac-efca1d06d3b3","Type":"ContainerStarted","Data":"db4c14c73ebf62bf7e4238dbb02a04895297bb6ce1047b392114bbeccf8cb934"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.232896 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-4848q" event={"ID":"526f8eee-e82b-4a1b-93ff-46a732856a7c","Type":"ContainerStarted","Data":"88d546e8762005e0182769c3ea818e3b0fe48997b6880cb3e1e2322fc610b172"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.259850 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" event={"ID":"92ccd256-d5a2-4857-8acb-f11bc462ac9c","Type":"ContainerStarted","Data":"f428310483055abf6400cf3756146ba743b2a6df299e29cb0743d772cc72d934"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.260871 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.265547 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-4848q" podStartSLOduration=6.265529756 podStartE2EDuration="6.265529756s" podCreationTimestamp="2026-02-27 16:27:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.264676583 +0000 UTC m=+219.411691020" watchObservedRunningTime="2026-02-27 16:27:37.265529756 +0000 UTC m=+219.412544203" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.265651 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" event={"ID":"89171c06-8a67-420c-bb2e-0608ceb22697","Type":"ContainerStarted","Data":"d4a14bb04a02a30a6f0a5eaf3ab1ba31a30f703ae40ee726c2be32ef72cfb5f9"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.267484 4751 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-bfl2l container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.267519 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" podUID="92ccd256-d5a2-4857-8acb-f11bc462ac9c" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.277340 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" event={"ID":"6efd7ce0-9b49-4c51-accf-3efcfc1188e6","Type":"ContainerStarted","Data":"ac44ab100ee04292ad4c40faef41bf61ca615c7ced92e5e7c2506100e5c38686"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.277441 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.284053 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:37 crc kubenswrapper[4751]: E0227 16:27:37.286185 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.786164717 +0000 UTC m=+219.933179164 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.301674 4751 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-mw4mn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.301742 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" podUID="6efd7ce0-9b49-4c51-accf-3efcfc1188e6" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.307780 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" event={"ID":"4155e5dc-eb83-4d58-bb2b-554fcbda2e8c","Type":"ContainerStarted","Data":"5de65d9b85a3cdaf673cadb4e941f947d6183f97b3740cf091ca9d93cd34ec3f"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.307855 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" event={"ID":"4155e5dc-eb83-4d58-bb2b-554fcbda2e8c","Type":"ContainerStarted","Data":"d9fcb8b5febf80bd0576afc7443f67ef30e49c7908b507485f72edaf5ef5db14"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.328551 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" podStartSLOduration=175.328525247 podStartE2EDuration="2m55.328525247s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.317894718 +0000 UTC m=+219.464909165" watchObservedRunningTime="2026-02-27 16:27:37.328525247 +0000 UTC m=+219.475539694" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.374698 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" event={"ID":"2680126d-1cf3-4cbd-a130-3d8d0070a394","Type":"ContainerStarted","Data":"38f9d3e3e1e837826613fae5a0ad31b0cbb1d346cd35d3364c567c6b64b60399"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.391464 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:37 crc kubenswrapper[4751]: E0227 16:27:37.394114 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.894095606 +0000 UTC m=+220.041110173 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.404631 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vf5cg" event={"ID":"7c1d7708-439d-4d51-ab68-0a2058480646","Type":"ContainerStarted","Data":"4f81f5265ecbcf6798bde0b6cbb901c372933d3beaf9e238ad74663e58477139"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.404676 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vf5cg" event={"ID":"7c1d7708-439d-4d51-ab68-0a2058480646","Type":"ContainerStarted","Data":"999c17cdc4e3a790a5544bdd299a4989c21ae2f8cfaee1446a3390b4f0434d16"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.407891 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjwh6" event={"ID":"fbe81302-7306-4254-b54b-a91b118197a3","Type":"ContainerStarted","Data":"bb5426b892f073bb4ffb8180cc72935959fbf79c591fff89048bd7647d008494"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.410433 4751 generic.go:334] "Generic (PLEG): container finished" podID="ef47b0ab-908a-4d99-9517-32a5984070fb" containerID="ced3dd66efa31cb99edd41fd5b01627b6128fa8b1b7ea84ceaf3453ec84bf620" exitCode=0 Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.410476 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" event={"ID":"ef47b0ab-908a-4d99-9517-32a5984070fb","Type":"ContainerDied","Data":"ced3dd66efa31cb99edd41fd5b01627b6128fa8b1b7ea84ceaf3453ec84bf620"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.451160 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" event={"ID":"10b563d1-9d53-4755-9b40-4f907b6ea224","Type":"ContainerStarted","Data":"42f1606d9dcf96037646d39adf3f9b0234e6acc6fbf7dddad0ff7e700ef62391"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.451600 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" podStartSLOduration=175.451584783 podStartE2EDuration="2m55.451584783s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.394287411 +0000 UTC m=+219.541301858" watchObservedRunningTime="2026-02-27 16:27:37.451584783 +0000 UTC m=+219.598599230" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.452131 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.452569 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rjqrq" podStartSLOduration=175.452563338 podStartE2EDuration="2m55.452563338s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.425516599 +0000 UTC m=+219.572531116" watchObservedRunningTime="2026-02-27 16:27:37.452563338 +0000 UTC m=+219.599577775" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.471924 4751 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn7fm container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.471968 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" podUID="10b563d1-9d53-4755-9b40-4f907b6ea224" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.474831 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" podStartSLOduration=176.474821922 podStartE2EDuration="2m56.474821922s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.472610974 +0000 UTC m=+219.619625421" watchObservedRunningTime="2026-02-27 16:27:37.474821922 +0000 UTC m=+219.621836379" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.478191 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-hb87p" event={"ID":"ee39d7ed-b569-4c34-8c19-a5f386c85b5c","Type":"ContainerStarted","Data":"cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.492561 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:37 crc kubenswrapper[4751]: E0227 16:27:37.493255 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:37.993235604 +0000 UTC m=+220.140250051 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.575537 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" event={"ID":"6490b89d-117d-4d8c-b625-b02d0404c882","Type":"ContainerStarted","Data":"7b1a081e79ab7512084f7c28d715defb936975ed5d552121fc6d1c5e706c42cd"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.581977 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" event={"ID":"ebd19e25-96f6-454f-9472-d504d15a4821","Type":"ContainerStarted","Data":"992fe6dbb94c44b4294ee9af87dca66a0f3b888d1a802342931d6c937ea82441"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.590957 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-7gs6x" event={"ID":"e7e88532-9a76-44df-b084-bfd9c96457c7","Type":"ContainerStarted","Data":"968951f4d54f05d076dfabb071d49d174140c5fa0e7b12da126d4767bdbf72c6"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.592922 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.593902 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:37 crc kubenswrapper[4751]: E0227 16:27:37.596815 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:38.096799929 +0000 UTC m=+220.243814386 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.599488 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" podStartSLOduration=175.599473029 podStartE2EDuration="2m55.599473029s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.597831806 +0000 UTC m=+219.744846283" watchObservedRunningTime="2026-02-27 16:27:37.599473029 +0000 UTC m=+219.746487476" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.600260 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-hb87p" podStartSLOduration=176.60025468 podStartE2EDuration="2m56.60025468s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.559980834 +0000 UTC m=+219.706995271" watchObservedRunningTime="2026-02-27 16:27:37.60025468 +0000 UTC m=+219.747269127" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.620967 4751 patch_prober.go:28] interesting pod/console-operator-58897d9998-7gs6x container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/readyz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.621011 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-7gs6x" podUID="e7e88532-9a76-44df-b084-bfd9c96457c7" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/readyz\": dial tcp 10.217.0.20:8443: connect: connection refused" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.621990 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" event={"ID":"ea0878fe-ac62-445d-87c1-248128612682","Type":"ContainerStarted","Data":"8fdbc1d218a5435c5be837f650505652b4dddf0e3cc30ade126e76284cf57c36"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.632279 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq" event={"ID":"520e0111-144a-404b-8dee-61fb546bc717","Type":"ContainerStarted","Data":"05dc040e8171caeaa94e29485226a7040eadc08a7bfdd06ebaf90973f9e55807"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.632334 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq" event={"ID":"520e0111-144a-404b-8dee-61fb546bc717","Type":"ContainerStarted","Data":"67c2c981a70cc9819ab4e74607d51db727176f4baf8c3bf7eda7319b04c7e814"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.661921 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" podStartSLOduration=175.661906856 podStartE2EDuration="2m55.661906856s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.629789324 +0000 UTC m=+219.776803771" watchObservedRunningTime="2026-02-27 16:27:37.661906856 +0000 UTC m=+219.808921303" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.666256 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" event={"ID":"ddcb9142-ff54-44b3-bb37-0e4b103c407e","Type":"ContainerStarted","Data":"19c99b9503c8a16e58fe1d589d498cbc870919c6ae17b28b363dc2e17ffe2be8"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.681977 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" event={"ID":"50c130e9-87b3-43aa-a620-8faefd1add54","Type":"ContainerStarted","Data":"3c7038c22dfa6abfded69c371803dcd7ce7e0c4d90ddaee99c4bc2ffa69a9c21"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.697932 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:37 crc kubenswrapper[4751]: E0227 16:27:37.698907 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:38.198891545 +0000 UTC m=+220.345905992 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.710585 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-j5z2q" podStartSLOduration=175.710569291 podStartE2EDuration="2m55.710569291s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.708033085 +0000 UTC m=+219.855047532" watchObservedRunningTime="2026-02-27 16:27:37.710569291 +0000 UTC m=+219.857583738" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.715642 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-7gs6x" podStartSLOduration=176.715616784 podStartE2EDuration="2m56.715616784s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.663128558 +0000 UTC m=+219.810143015" watchObservedRunningTime="2026-02-27 16:27:37.715616784 +0000 UTC m=+219.862631231" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.725451 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" event={"ID":"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3","Type":"ContainerStarted","Data":"08ecd476b1673767cb59270915513b74f10f3f25bb3cf37d470dfbcde6489476"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.746667 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" event={"ID":"03b7032d-fb47-4f5c-95b8-f69f58b65db7","Type":"ContainerStarted","Data":"9a9bbd93130046129464fe39e700488237dcaf7b0c9e1dc4f2876a38d52abbb9"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.776416 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:37 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:37 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:37 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.776464 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.776692 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" event={"ID":"578d81ae-01aa-4cc7-bdee-de283490661d","Type":"ContainerStarted","Data":"6f30afdfc8c33f7585171773f7b018ef75d2f614b05d1f3d710567164464368f"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.777187 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.795220 4751 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-k4pxk container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.795495 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" podUID="578d81ae-01aa-4cc7-bdee-de283490661d" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.799482 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:37 crc kubenswrapper[4751]: E0227 16:27:37.801376 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:38.301364491 +0000 UTC m=+220.448378938 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.827477 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vrmbr" podStartSLOduration=175.827454025 podStartE2EDuration="2m55.827454025s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.741827811 +0000 UTC m=+219.888842248" watchObservedRunningTime="2026-02-27 16:27:37.827454025 +0000 UTC m=+219.974468472" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.828001 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" podStartSLOduration=175.827996569 podStartE2EDuration="2m55.827996569s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:37.809756011 +0000 UTC m=+219.956770458" watchObservedRunningTime="2026-02-27 16:27:37.827996569 +0000 UTC m=+219.975011016" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.849068 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" event={"ID":"7c6775c9-9034-4be4-8002-201a25a35eab","Type":"ContainerStarted","Data":"c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.881490 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.916991 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:37 crc kubenswrapper[4751]: E0227 16:27:37.918117 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:38.418099251 +0000 UTC m=+220.565113698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.958900 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" event={"ID":"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95","Type":"ContainerStarted","Data":"45883ba3ac6e37f3e2f613498e8174c9f01655f4f55e809f8855584c138a6c70"} Feb 27 16:27:37 crc kubenswrapper[4751]: I0227 16:27:37.984828 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" event={"ID":"daf0456f-25d3-4dee-8eac-49f0056aa251","Type":"ContainerStarted","Data":"18eee02002998e8a0cae1cd24bb8eff208262fd3a634fe479795d3855a3b4e70"} Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.010725 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" event={"ID":"714d38ad-6a1e-4d9d-98ce-22a8582a6429","Type":"ContainerStarted","Data":"b4380a4a00ba2bc596117fd319896d9ff57c85b6b5ef4da3becaa4e8c483d87c"} Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.010765 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-5f8sr" Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.023359 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.023711 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:38.523700179 +0000 UTC m=+220.670714616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.024117 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.028068 4751 patch_prober.go:28] interesting pod/downloads-7954f5f757-5f8sr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" start-of-body= Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.028119 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-5f8sr" podUID="ee5f53d5-2c38-465d-ad33-d5c0c5eb3923" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.069260 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" podStartSLOduration=176.069240963 podStartE2EDuration="2m56.069240963s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:38.06761189 +0000 UTC m=+220.214626337" watchObservedRunningTime="2026-02-27 16:27:38.069240963 +0000 UTC m=+220.216255410" Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.090946 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.090996 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.122604 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-8fhbk" podStartSLOduration=177.122587781 podStartE2EDuration="2m57.122587781s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:38.121972585 +0000 UTC m=+220.268987032" watchObservedRunningTime="2026-02-27 16:27:38.122587781 +0000 UTC m=+220.269602228" Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.124652 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.125900 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:38.625884767 +0000 UTC m=+220.772899204 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.226526 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.237541 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:38.737525964 +0000 UTC m=+220.884540411 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.330939 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.331498 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:38.831483707 +0000 UTC m=+220.978498154 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.422328 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-4bnbv"] Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.434836 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.435203 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:38.935186575 +0000 UTC m=+221.082201022 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.535478 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.535645 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.035606317 +0000 UTC m=+221.182620764 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.536022 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.536420 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.036390148 +0000 UTC m=+221.183404595 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.545655 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:38 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:38 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:38 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.545702 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.547793 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:38 crc kubenswrapper[4751]: W0227 16:27:38.601427 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-9891ae10af0da22eb1a31c3d47b98013ad1a80c9ae0c03d4a2ff03eb544807ef WatchSource:0}: Error finding container 9891ae10af0da22eb1a31c3d47b98013ad1a80c9ae0c03d4a2ff03eb544807ef: Status 404 returned error can't find the container with id 9891ae10af0da22eb1a31c3d47b98013ad1a80c9ae0c03d4a2ff03eb544807ef Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.642864 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.643348 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.143327771 +0000 UTC m=+221.290342218 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.643494 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.643889 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.143876925 +0000 UTC m=+221.290891372 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.745028 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.745327 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.245308144 +0000 UTC m=+221.392322591 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.745388 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.745774 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.245758916 +0000 UTC m=+221.392773363 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.846200 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.846526 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.346510577 +0000 UTC m=+221.493525014 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:38 crc kubenswrapper[4751]: I0227 16:27:38.947246 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:38 crc kubenswrapper[4751]: E0227 16:27:38.947891 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.447878744 +0000 UTC m=+221.594893191 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.048894 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:39 crc kubenswrapper[4751]: E0227 16:27:39.049199 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.549182339 +0000 UTC m=+221.696196786 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.066169 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" event={"ID":"7da183a7-dcda-4e22-b135-b1ef0d593811","Type":"ContainerStarted","Data":"28688a5601f7fee3ac01a83c19d3116bcce63b2c6d7257cdf15819979b888511"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.080707 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" event={"ID":"4b967e25-f884-4c9b-8307-cd4b669bbf76","Type":"ContainerStarted","Data":"3dd9345f18d863bea46fcd131d4328efd97edd8c53e84b3f2854dd6ae77197c2"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.087147 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" event={"ID":"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70","Type":"ContainerStarted","Data":"4118cad3cd1453aee0cd93571084ed066314a479a0a6c53980e582b621d17d5c"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.140512 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" event={"ID":"ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3","Type":"ContainerStarted","Data":"363f3b62151d289f67dffb68adec1fbac5083a3b8cc5b26e5d34f1dca0a7b475"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.151235 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:39 crc kubenswrapper[4751]: E0227 16:27:39.152329 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.652314671 +0000 UTC m=+221.799329128 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.157883 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" event={"ID":"ef47b0ab-908a-4d99-9517-32a5984070fb","Type":"ContainerStarted","Data":"1c1cf6874d3832f331e744e27455c60d3b618b65a3123cbc473acce84fb233b8"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.158487 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.169177 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" event={"ID":"e03572cc-cf26-4ed2-bb88-ae6d3150b904","Type":"ContainerStarted","Data":"0a70373d9f06684a55ceaa97dffa1456011249937c62dc2e4473fd82f1ebcce7"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.177912 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" event={"ID":"50c130e9-87b3-43aa-a620-8faefd1add54","Type":"ContainerStarted","Data":"cea8d7e1007934e1879edf1185a32291efa93f859d9f98c037386045be56178f"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.193952 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wwdf6" podStartSLOduration=178.193925972 podStartE2EDuration="2m58.193925972s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.108626607 +0000 UTC m=+221.255641054" watchObservedRunningTime="2026-02-27 16:27:39.193925972 +0000 UTC m=+221.340940429" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.195217 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-7hctb" podStartSLOduration=177.195209606 podStartE2EDuration="2m57.195209606s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.180083439 +0000 UTC m=+221.327097886" watchObservedRunningTime="2026-02-27 16:27:39.195209606 +0000 UTC m=+221.342224053" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.208797 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"9b9995f840a5cf5b043942977998ecaf2b38e0874e01696c0c895d8895c7f417"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.216742 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vf5cg" event={"ID":"7c1d7708-439d-4d51-ab68-0a2058480646","Type":"ContainerStarted","Data":"58ac94e2227decbd00df1845cd76901dd063a5803afd1d86e53a2ee8dec41979"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.219107 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.239108 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-hfzxd" event={"ID":"7b0a19d1-3c2c-444a-8b89-b417e7324b78","Type":"ContainerStarted","Data":"49a80f8956adbb04b68a3d6f85d7dd0b1d86f4510d6776d865c781c77b685aa1"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.254669 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:39 crc kubenswrapper[4751]: E0227 16:27:39.255721 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.755705471 +0000 UTC m=+221.902719918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.260161 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq" event={"ID":"520e0111-144a-404b-8dee-61fb546bc717","Type":"ContainerStarted","Data":"26826833c7d21a319dfcee9b19430fc74713dfa8dc24047be1cb319fb1282413"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.275268 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-p4n6h" event={"ID":"daf0456f-25d3-4dee-8eac-49f0056aa251","Type":"ContainerStarted","Data":"7f725aec7cb51831947218d231c7d31f1fe5ef3ae8551b08fd75215b74dc6698"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.302078 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" podStartSLOduration=178.302057836 podStartE2EDuration="2m58.302057836s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.248185564 +0000 UTC m=+221.395200011" watchObservedRunningTime="2026-02-27 16:27:39.302057836 +0000 UTC m=+221.449072283" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.302255 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sk82j" podStartSLOduration=177.302251931 podStartE2EDuration="2m57.302251931s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.294449287 +0000 UTC m=+221.441463734" watchObservedRunningTime="2026-02-27 16:27:39.302251931 +0000 UTC m=+221.449266368" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.316840 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33706: no serving certificate available for the kubelet" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.346415 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" event={"ID":"89171c06-8a67-420c-bb2e-0608ceb22697","Type":"ContainerStarted","Data":"041522e435f8029602e2f0b7bda1c1d003e5f9c1ca9b58c67b831c3caba09337"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.347085 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.354444 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" event={"ID":"ebd19e25-96f6-454f-9472-d504d15a4821","Type":"ContainerStarted","Data":"2076c9cec554e2a7f5cfe7e7b5bcf5899c8be4c19e3d6c8af09af32af9bd0f31"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.357250 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:39 crc kubenswrapper[4751]: E0227 16:27:39.358970 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.858957488 +0000 UTC m=+222.005971935 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.380876 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" event={"ID":"10b563d1-9d53-4755-9b40-4f907b6ea224","Type":"ContainerStarted","Data":"b35fc03ae35f7f242a93d1afe38573f433855c7e511597fc2bfc98fd547b2129"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.383911 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vnzzq" podStartSLOduration=177.383883671 podStartE2EDuration="2m57.383883671s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.333722346 +0000 UTC m=+221.480736783" watchObservedRunningTime="2026-02-27 16:27:39.383883671 +0000 UTC m=+221.530898118" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.390531 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" event={"ID":"03b7032d-fb47-4f5c-95b8-f69f58b65db7","Type":"ContainerStarted","Data":"cd212df288a265afa243a3a99b31a32ad80ba417318f17f35985c3cf80779e74"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.421028 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-hfzxd" podStartSLOduration=8.421014074 podStartE2EDuration="8.421014074s" podCreationTimestamp="2026-02-27 16:27:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.419805793 +0000 UTC m=+221.566820240" watchObservedRunningTime="2026-02-27 16:27:39.421014074 +0000 UTC m=+221.568028521" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.421334 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-vf5cg" podStartSLOduration=8.421330473 podStartE2EDuration="8.421330473s" podCreationTimestamp="2026-02-27 16:27:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.381987281 +0000 UTC m=+221.529001728" watchObservedRunningTime="2026-02-27 16:27:39.421330473 +0000 UTC m=+221.568344920" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.435217 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33720: no serving certificate available for the kubelet" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.459466 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.459781 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"4c96bdd021e89c36b6a8735aec04a5d457c2d13a3a3110c0c1e727e45d5ceaa3"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.460413 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:27:39 crc kubenswrapper[4751]: E0227 16:27:39.460694 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:39.960679404 +0000 UTC m=+222.107693851 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.482325 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9891ae10af0da22eb1a31c3d47b98013ad1a80c9ae0c03d4a2ff03eb544807ef"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.493791 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6r57j" podStartSLOduration=177.493771982 podStartE2EDuration="2m57.493771982s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.491323227 +0000 UTC m=+221.638337674" watchObservedRunningTime="2026-02-27 16:27:39.493771982 +0000 UTC m=+221.640786429" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.524518 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" event={"ID":"f3483f04-316b-403b-9117-b744e8bc5c3f","Type":"ContainerStarted","Data":"795c05ac9ba47e057dbb23291ea39981ab57533c569caa6a83634cff905f9b48"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.532668 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:39 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:39 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:39 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.532726 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.562087 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:39 crc kubenswrapper[4751]: E0227 16:27:39.562366 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.062354689 +0000 UTC m=+222.209369136 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.563647 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" event={"ID":"a74a9206-b910-48c2-8448-29baf6140688","Type":"ContainerStarted","Data":"6c466d2ad4c0be22a72f9e60e4d0b5e46afc224a3974375a0fcc33fb721dec8c"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.564985 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" podStartSLOduration=177.564970038 podStartE2EDuration="2m57.564970038s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.564329671 +0000 UTC m=+221.711344118" watchObservedRunningTime="2026-02-27 16:27:39.564970038 +0000 UTC m=+221.711984485" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.565428 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qs2km" event={"ID":"d474cfdd-550b-471d-aea1-6bc3f8532fa5","Type":"ContainerStarted","Data":"40e4df41627d70e05b77f718af99817cbe4bd0c67f37c5b3a517c7736c97751a"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.567147 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" event={"ID":"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95","Type":"ContainerStarted","Data":"c3ae26b5641b1b2cc6ad6ebb8f8b1f4ecbac3ab00069d2c14c376c6b23cd18f3"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.567167 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" event={"ID":"a36a1ad3-51ef-4fbd-b18c-6a5918e0ec95","Type":"ContainerStarted","Data":"bf3246915c3b18ced6668291dadb6188515584db7f1b46115d6f6d929023d00e"} Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.570261 4751 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-mw4mn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.570303 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" podUID="6efd7ce0-9b49-4c51-accf-3efcfc1188e6" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.581770 4751 patch_prober.go:28] interesting pod/downloads-7954f5f757-5f8sr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" start-of-body= Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.581826 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-5f8sr" podUID="ee5f53d5-2c38-465d-ad33-d5c0c5eb3923" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.585103 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bfl2l" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.586221 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c458q" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.653975 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-7gs6x" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.661522 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33736: no serving certificate available for the kubelet" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.662755 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:39 crc kubenswrapper[4751]: E0227 16:27:39.667832 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.167812223 +0000 UTC m=+222.314826670 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.690908 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k4pxk" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.748723 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jxspb" podStartSLOduration=177.748609941 podStartE2EDuration="2m57.748609941s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.611574479 +0000 UTC m=+221.758588936" watchObservedRunningTime="2026-02-27 16:27:39.748609941 +0000 UTC m=+221.895624388" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.766134 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:39 crc kubenswrapper[4751]: E0227 16:27:39.766447 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.266435799 +0000 UTC m=+222.413450246 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.813493 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33740: no serving certificate available for the kubelet" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.840673 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-txmh7" podStartSLOduration=178.840659594 podStartE2EDuration="2m58.840659594s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.838908898 +0000 UTC m=+221.985923345" watchObservedRunningTime="2026-02-27 16:27:39.840659594 +0000 UTC m=+221.987674041" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.870295 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:39 crc kubenswrapper[4751]: E0227 16:27:39.870623 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.370607419 +0000 UTC m=+222.517621866 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.925368 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33752: no serving certificate available for the kubelet" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.946530 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gjlwd" podStartSLOduration=178.946516989 podStartE2EDuration="2m58.946516989s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.893216422 +0000 UTC m=+222.040230869" watchObservedRunningTime="2026-02-27 16:27:39.946516989 +0000 UTC m=+222.093531436" Feb 27 16:27:39 crc kubenswrapper[4751]: I0227 16:27:39.972269 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:39 crc kubenswrapper[4751]: E0227 16:27:39.972602 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.472588702 +0000 UTC m=+222.619603149 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.018044 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-297f4" podStartSLOduration=178.018027743 podStartE2EDuration="2m58.018027743s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:39.988991052 +0000 UTC m=+222.136005489" watchObservedRunningTime="2026-02-27 16:27:40.018027743 +0000 UTC m=+222.165042190" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.043738 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33764: no serving certificate available for the kubelet" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.067278 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn7fm" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.073917 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.074131 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.574114854 +0000 UTC m=+222.721129291 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.074166 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.074502 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.574392251 +0000 UTC m=+222.721406698 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.097525 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ksw46"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.106369 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.108373 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.118214 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ksw46"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.176035 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.176222 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-catalog-content\") pod \"certified-operators-ksw46\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.176291 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-utilities\") pod \"certified-operators-ksw46\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.176311 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4k6v\" (UniqueName: \"kubernetes.io/projected/1c35558f-cd8a-4a04-baca-ea445d76b712-kube-api-access-q4k6v\") pod \"certified-operators-ksw46\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.176377 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.676353043 +0000 UTC m=+222.823367490 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.213396 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-htn5q"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.222750 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33768: no serving certificate available for the kubelet" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.244952 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.277392 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-catalog-content\") pod \"certified-operators-ksw46\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.277473 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.277502 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-utilities\") pod \"certified-operators-ksw46\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.277519 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4k6v\" (UniqueName: \"kubernetes.io/projected/1c35558f-cd8a-4a04-baca-ea445d76b712-kube-api-access-q4k6v\") pod \"certified-operators-ksw46\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.278429 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-catalog-content\") pod \"certified-operators-ksw46\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.278639 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.778629274 +0000 UTC m=+222.925643721 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.278944 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-utilities\") pod \"certified-operators-ksw46\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.294130 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-94pcv"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.295009 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.296868 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.305151 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-94pcv"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.313618 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4k6v\" (UniqueName: \"kubernetes.io/projected/1c35558f-cd8a-4a04-baca-ea445d76b712-kube-api-access-q4k6v\") pod \"certified-operators-ksw46\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.315904 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33784: no serving certificate available for the kubelet" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.379276 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.379847 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r66n\" (UniqueName: \"kubernetes.io/projected/7412acf1-544d-4fbb-a538-2071988c8ae1-kube-api-access-5r66n\") pod \"community-operators-94pcv\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.379893 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-utilities\") pod \"community-operators-94pcv\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.379941 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-catalog-content\") pod \"community-operators-94pcv\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.380049 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.880033932 +0000 UTC m=+223.027048379 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.436560 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.481164 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-utilities\") pod \"community-operators-94pcv\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.481203 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.481281 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-catalog-content\") pod \"community-operators-94pcv\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.481324 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r66n\" (UniqueName: \"kubernetes.io/projected/7412acf1-544d-4fbb-a538-2071988c8ae1-kube-api-access-5r66n\") pod \"community-operators-94pcv\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.481621 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:40.981605595 +0000 UTC m=+223.128620042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.481808 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-catalog-content\") pod \"community-operators-94pcv\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.482049 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-utilities\") pod \"community-operators-94pcv\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.490131 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z4j9x"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.491147 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.535531 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:40 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:40 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:40 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.535582 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.559951 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z4j9x"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.567695 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r66n\" (UniqueName: \"kubernetes.io/projected/7412acf1-544d-4fbb-a538-2071988c8ae1-kube-api-access-5r66n\") pod \"community-operators-94pcv\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.583981 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.584257 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-utilities\") pod \"certified-operators-z4j9x\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.584328 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-catalog-content\") pod \"certified-operators-z4j9x\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.584392 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7gnl\" (UniqueName: \"kubernetes.io/projected/361c2acb-bff0-4874-b92e-56f883281f35-kube-api-access-w7gnl\") pod \"certified-operators-z4j9x\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.584549 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.084528913 +0000 UTC m=+223.231543360 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.626651 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.629888 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" event={"ID":"c3fb94cc-a0da-4ecb-b0ba-a4666e264e70","Type":"ContainerStarted","Data":"f13a1c8f95e8e577fe0c6be645d498a87fbefbcbe6e49c30c8dc4b26ec24fc99"} Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.648590 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" event={"ID":"89171c06-8a67-420c-bb2e-0608ceb22697","Type":"ContainerStarted","Data":"4ad6e293c3fc51e26225ba5b835d5a0c54f70623cde7786c6b17bce8c58959e7"} Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.669464 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" podStartSLOduration=179.669447278 podStartE2EDuration="2m59.669447278s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:40.668315849 +0000 UTC m=+222.815330296" watchObservedRunningTime="2026-02-27 16:27:40.669447278 +0000 UTC m=+222.816461725" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.685521 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-catalog-content\") pod \"certified-operators-z4j9x\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.685572 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.685618 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7gnl\" (UniqueName: \"kubernetes.io/projected/361c2acb-bff0-4874-b92e-56f883281f35-kube-api-access-w7gnl\") pod \"certified-operators-z4j9x\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.685711 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-utilities\") pod \"certified-operators-z4j9x\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.687087 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.187075721 +0000 UTC m=+223.334090168 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.687776 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-catalog-content\") pod \"certified-operators-z4j9x\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.688343 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-utilities\") pod \"certified-operators-z4j9x\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.720940 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-c9mg9"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.723320 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"9ec00ec5be489c43b1f335230e2013333af3fbb354240262e7b10cb11cf75db9"} Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.723424 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.735140 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7gnl\" (UniqueName: \"kubernetes.io/projected/361c2acb-bff0-4874-b92e-56f883281f35-kube-api-access-w7gnl\") pod \"certified-operators-z4j9x\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.742088 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c9mg9"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.751942 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"35a7f1fec8ccbe717ab07a24c2df3fa005a9fb8e936670614e88729f66067fc2"} Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.773417 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"14107601533e92dbf3adaf04f5530e57fa73d4e35ff872780218d85660221324"} Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.786371 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.786607 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-utilities\") pod \"community-operators-c9mg9\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.786635 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4xdf\" (UniqueName: \"kubernetes.io/projected/ed03128a-80cd-404b-991d-99f04fdab36e-kube-api-access-v4xdf\") pod \"community-operators-c9mg9\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.786702 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-catalog-content\") pod \"community-operators-c9mg9\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.786803 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.286790154 +0000 UTC m=+223.433804601 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.791374 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" event={"ID":"e03572cc-cf26-4ed2-bb88-ae6d3150b904","Type":"ContainerStarted","Data":"fa6737149a28fdf06eafe42cb364cc2aad0375495d0fe672bc6fbc75bef744e6"} Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.818708 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.822798 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" event={"ID":"7da183a7-dcda-4e22-b135-b1ef0d593811","Type":"ContainerStarted","Data":"8ae11f67d8aa80ca18ba37853054a6e03517c7f0ba72d03b6c4e5a6bb4143c59"} Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.822829 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4bnbv" event={"ID":"7da183a7-dcda-4e22-b135-b1ef0d593811","Type":"ContainerStarted","Data":"bd972804e017632e1e0f1a70f694f209a0d5e26c3990a88cfe13e12428dfbb68"} Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.826063 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" podUID="8de82997-0daf-469d-ba5e-23fcaaa04614" containerName="controller-manager" containerID="cri-o://b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0" gracePeriod=30 Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.831501 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-r87pw" podStartSLOduration=178.831486006 podStartE2EDuration="2m58.831486006s" podCreationTimestamp="2026-02-27 16:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:40.83089762 +0000 UTC m=+222.977912067" watchObservedRunningTime="2026-02-27 16:27:40.831486006 +0000 UTC m=+222.978500453" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.879944 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-4bnbv" podStartSLOduration=179.879928446 podStartE2EDuration="2m59.879928446s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:40.855594798 +0000 UTC m=+223.002609245" watchObservedRunningTime="2026-02-27 16:27:40.879928446 +0000 UTC m=+223.026942893" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.887691 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-utilities\") pod \"community-operators-c9mg9\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.887804 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4xdf\" (UniqueName: \"kubernetes.io/projected/ed03128a-80cd-404b-991d-99f04fdab36e-kube-api-access-v4xdf\") pod \"community-operators-c9mg9\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.887827 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.888049 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-catalog-content\") pod \"community-operators-c9mg9\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.888757 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ksw46"] Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.892086 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-catalog-content\") pod \"community-operators-c9mg9\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.892559 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-utilities\") pod \"community-operators-c9mg9\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.893878 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.393865531 +0000 UTC m=+223.540879978 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.925493 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4xdf\" (UniqueName: \"kubernetes.io/projected/ed03128a-80cd-404b-991d-99f04fdab36e-kube-api-access-v4xdf\") pod \"community-operators-c9mg9\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:40 crc kubenswrapper[4751]: I0227 16:27:40.992021 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:40 crc kubenswrapper[4751]: E0227 16:27:40.992586 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.492571298 +0000 UTC m=+223.639585745 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.060764 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33798: no serving certificate available for the kubelet" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.071001 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.096190 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.096481 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.596469412 +0000 UTC m=+223.743483859 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.205061 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.205327 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.705303184 +0000 UTC m=+223.852317631 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.205512 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.205879 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.705868889 +0000 UTC m=+223.852883336 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.223869 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-94pcv"] Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.306882 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.307096 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.807062782 +0000 UTC m=+223.954077229 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.307317 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.307625 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.807610886 +0000 UTC m=+223.954625333 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.339620 4751 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.410905 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.411295 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:41.911280093 +0000 UTC m=+224.058294530 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.512271 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.512960 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:42.012944528 +0000 UTC m=+224.159958975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.541586 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:41 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:41 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:41 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.541633 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.581605 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.623319 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-proxy-ca-bundles\") pod \"8de82997-0daf-469d-ba5e-23fcaaa04614\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.623365 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grvjx\" (UniqueName: \"kubernetes.io/projected/8de82997-0daf-469d-ba5e-23fcaaa04614-kube-api-access-grvjx\") pod \"8de82997-0daf-469d-ba5e-23fcaaa04614\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.623505 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.623533 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-client-ca\") pod \"8de82997-0daf-469d-ba5e-23fcaaa04614\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.623564 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-config\") pod \"8de82997-0daf-469d-ba5e-23fcaaa04614\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.623632 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8de82997-0daf-469d-ba5e-23fcaaa04614-serving-cert\") pod \"8de82997-0daf-469d-ba5e-23fcaaa04614\" (UID: \"8de82997-0daf-469d-ba5e-23fcaaa04614\") " Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.625521 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-59668987f7-k9dmx"] Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.625811 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8de82997-0daf-469d-ba5e-23fcaaa04614" containerName="controller-manager" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.625827 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="8de82997-0daf-469d-ba5e-23fcaaa04614" containerName="controller-manager" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.625948 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="8de82997-0daf-469d-ba5e-23fcaaa04614" containerName="controller-manager" Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.626268 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:42.126245158 +0000 UTC m=+224.273259605 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.626312 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.626470 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c9mg9"] Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.626747 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-client-ca" (OuterVolumeSpecName: "client-ca") pod "8de82997-0daf-469d-ba5e-23fcaaa04614" (UID: "8de82997-0daf-469d-ba5e-23fcaaa04614"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.626799 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "8de82997-0daf-469d-ba5e-23fcaaa04614" (UID: "8de82997-0daf-469d-ba5e-23fcaaa04614"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.627091 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-config" (OuterVolumeSpecName: "config") pod "8de82997-0daf-469d-ba5e-23fcaaa04614" (UID: "8de82997-0daf-469d-ba5e-23fcaaa04614"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.652066 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-59668987f7-k9dmx"] Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.670979 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8de82997-0daf-469d-ba5e-23fcaaa04614-kube-api-access-grvjx" (OuterVolumeSpecName: "kube-api-access-grvjx") pod "8de82997-0daf-469d-ba5e-23fcaaa04614" (UID: "8de82997-0daf-469d-ba5e-23fcaaa04614"). InnerVolumeSpecName "kube-api-access-grvjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.676419 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8de82997-0daf-469d-ba5e-23fcaaa04614-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8de82997-0daf-469d-ba5e-23fcaaa04614" (UID: "8de82997-0daf-469d-ba5e-23fcaaa04614"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:27:41 crc kubenswrapper[4751]: W0227 16:27:41.679592 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded03128a_80cd_404b_991d_99f04fdab36e.slice/crio-42150a2aebab6e59384ccbe9a8a0cff9801983fe556bb6bac3073c9de588394a WatchSource:0}: Error finding container 42150a2aebab6e59384ccbe9a8a0cff9801983fe556bb6bac3073c9de588394a: Status 404 returned error can't find the container with id 42150a2aebab6e59384ccbe9a8a0cff9801983fe556bb6bac3073c9de588394a Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.724838 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-config\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.725227 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mb7hx\" (UniqueName: \"kubernetes.io/projected/9dd7ada9-9067-4084-b1e2-145e8bca15a0-kube-api-access-mb7hx\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.725258 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-client-ca\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.725275 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-proxy-ca-bundles\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.725296 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dd7ada9-9067-4084-b1e2-145e8bca15a0-serving-cert\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.725329 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.725372 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8de82997-0daf-469d-ba5e-23fcaaa04614-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.725383 4751 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.725392 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grvjx\" (UniqueName: \"kubernetes.io/projected/8de82997-0daf-469d-ba5e-23fcaaa04614-kube-api-access-grvjx\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.725413 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.725421 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8de82997-0daf-469d-ba5e-23fcaaa04614-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.725729 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:42.225717245 +0000 UTC m=+224.372731692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.826186 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.826389 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-config\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.826441 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-27 16:27:42.326417335 +0000 UTC m=+224.473431772 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.826463 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mb7hx\" (UniqueName: \"kubernetes.io/projected/9dd7ada9-9067-4084-b1e2-145e8bca15a0-kube-api-access-mb7hx\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.826490 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-client-ca\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.826505 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-proxy-ca-bundles\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.826526 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dd7ada9-9067-4084-b1e2-145e8bca15a0-serving-cert\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.826553 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.826870 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-27 16:27:42.326834266 +0000 UTC m=+224.473848713 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zjr9n" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.827856 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-client-ca\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.828024 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-config\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.829577 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-proxy-ca-bundles\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.836269 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dd7ada9-9067-4084-b1e2-145e8bca15a0-serving-cert\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.837914 4751 generic.go:334] "Generic (PLEG): container finished" podID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerID="fe924e01ba8e552fc1f993d5fc0a42e34974911c56b846c9015377650c69ed65" exitCode=0 Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.837973 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksw46" event={"ID":"1c35558f-cd8a-4a04-baca-ea445d76b712","Type":"ContainerDied","Data":"fe924e01ba8e552fc1f993d5fc0a42e34974911c56b846c9015377650c69ed65"} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.838002 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksw46" event={"ID":"1c35558f-cd8a-4a04-baca-ea445d76b712","Type":"ContainerStarted","Data":"b4517f5fa75ca995852be11bc16546544d1b7baf029f9682733d6b2cdaa21274"} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.840375 4751 generic.go:334] "Generic (PLEG): container finished" podID="8de82997-0daf-469d-ba5e-23fcaaa04614" containerID="b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0" exitCode=0 Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.840443 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" event={"ID":"8de82997-0daf-469d-ba5e-23fcaaa04614","Type":"ContainerDied","Data":"b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0"} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.840458 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" event={"ID":"8de82997-0daf-469d-ba5e-23fcaaa04614","Type":"ContainerDied","Data":"4701c4676c18f2ff2b531b324c2ee0b7ef2bc8bccc9466ef74c96d76c80e9e35"} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.840473 4751 scope.go:117] "RemoveContainer" containerID="b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.840618 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-htn5q" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.848258 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mb7hx\" (UniqueName: \"kubernetes.io/projected/9dd7ada9-9067-4084-b1e2-145e8bca15a0-kube-api-access-mb7hx\") pod \"controller-manager-59668987f7-k9dmx\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.882368 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-htn5q"] Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.893803 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-htn5q"] Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.896672 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qs2km" event={"ID":"d474cfdd-550b-471d-aea1-6bc3f8532fa5","Type":"ContainerStarted","Data":"0871335b4e34a2c0dbaaf5f485bee2720949bd8f5bd99d061f3b42cced733abe"} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.896708 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qs2km" event={"ID":"d474cfdd-550b-471d-aea1-6bc3f8532fa5","Type":"ContainerStarted","Data":"1b82471bb3f6d17f0c27b045ecdaa903c7c9ac6e2c6794292a78d832eba3d899"} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.896801 4751 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-02-27T16:27:41.339649716Z","Handler":null,"Name":""} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.898844 4751 scope.go:117] "RemoveContainer" containerID="b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0" Feb 27 16:27:41 crc kubenswrapper[4751]: E0227 16:27:41.899169 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0\": container with ID starting with b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0 not found: ID does not exist" containerID="b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.899195 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0"} err="failed to get container status \"b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0\": rpc error: code = NotFound desc = could not find container \"b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0\": container with ID starting with b630a4a274603049de306a73a25b32e5680481bbb3d1b78236d650b0bbe4c5a0 not found: ID does not exist" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.900484 4751 generic.go:334] "Generic (PLEG): container finished" podID="2680126d-1cf3-4cbd-a130-3d8d0070a394" containerID="38f9d3e3e1e837826613fae5a0ad31b0cbb1d346cd35d3364c567c6b64b60399" exitCode=0 Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.900542 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" event={"ID":"2680126d-1cf3-4cbd-a130-3d8d0070a394","Type":"ContainerDied","Data":"38f9d3e3e1e837826613fae5a0ad31b0cbb1d346cd35d3364c567c6b64b60399"} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.901939 4751 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.901966 4751 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.911954 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c9mg9" event={"ID":"ed03128a-80cd-404b-991d-99f04fdab36e","Type":"ContainerStarted","Data":"42150a2aebab6e59384ccbe9a8a0cff9801983fe556bb6bac3073c9de588394a"} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.914078 4751 generic.go:334] "Generic (PLEG): container finished" podID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerID="37d6324a08c50ef22c14f699bf90eb965bf368048332b99e50383ab8c0ad69b0" exitCode=0 Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.916286 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-94pcv" event={"ID":"7412acf1-544d-4fbb-a538-2071988c8ae1","Type":"ContainerDied","Data":"37d6324a08c50ef22c14f699bf90eb965bf368048332b99e50383ab8c0ad69b0"} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.916328 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-94pcv" event={"ID":"7412acf1-544d-4fbb-a538-2071988c8ae1","Type":"ContainerStarted","Data":"298a8a1a0704ff36737e976a60cc129a06cc2f2b28a956480535e5c4e31b89bb"} Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.918646 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" podUID="7c6775c9-9034-4be4-8002-201a25a35eab" containerName="route-controller-manager" containerID="cri-o://c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335" gracePeriod=30 Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.928172 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.931549 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2c2zk" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.942970 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z4j9x"] Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.953625 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 27 16:27:41 crc kubenswrapper[4751]: I0227 16:27:41.976538 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.039593 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.046687 4751 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.046729 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.076943 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zjr9n\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.094949 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-n8xtl"] Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.100669 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.103076 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n8xtl"] Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.103607 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.141020 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-catalog-content\") pod \"redhat-marketplace-n8xtl\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.141072 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26mdd\" (UniqueName: \"kubernetes.io/projected/317aef2b-3749-4a30-afc6-96f40516eae7-kube-api-access-26mdd\") pod \"redhat-marketplace-n8xtl\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.141144 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-utilities\") pod \"redhat-marketplace-n8xtl\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.180317 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.181016 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.182768 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.182886 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.183144 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.242041 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-catalog-content\") pod \"redhat-marketplace-n8xtl\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.242086 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26mdd\" (UniqueName: \"kubernetes.io/projected/317aef2b-3749-4a30-afc6-96f40516eae7-kube-api-access-26mdd\") pod \"redhat-marketplace-n8xtl\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.242124 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a1dd12d8-c2f3-44ea-97f2-aacff8b11867\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.242149 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a1dd12d8-c2f3-44ea-97f2-aacff8b11867\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.242173 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-utilities\") pod \"redhat-marketplace-n8xtl\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.242907 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-utilities\") pod \"redhat-marketplace-n8xtl\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.242950 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-catalog-content\") pod \"redhat-marketplace-n8xtl\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.258815 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26mdd\" (UniqueName: \"kubernetes.io/projected/317aef2b-3749-4a30-afc6-96f40516eae7-kube-api-access-26mdd\") pod \"redhat-marketplace-n8xtl\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.363623 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33812: no serving certificate available for the kubelet" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.374054 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.374279 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a1dd12d8-c2f3-44ea-97f2-aacff8b11867\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.374325 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a1dd12d8-c2f3-44ea-97f2-aacff8b11867\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.374865 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a1dd12d8-c2f3-44ea-97f2-aacff8b11867\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.418204 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a1dd12d8-c2f3-44ea-97f2-aacff8b11867\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.419941 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.457893 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.489778 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-k5lsc"] Feb 27 16:27:42 crc kubenswrapper[4751]: E0227 16:27:42.490077 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c6775c9-9034-4be4-8002-201a25a35eab" containerName="route-controller-manager" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.490096 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c6775c9-9034-4be4-8002-201a25a35eab" containerName="route-controller-manager" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.490222 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c6775c9-9034-4be4-8002-201a25a35eab" containerName="route-controller-manager" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.491534 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.507017 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.508658 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-59668987f7-k9dmx"] Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.511603 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5lsc"] Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.537489 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:42 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:42 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:42 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.537563 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.559824 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8de82997-0daf-469d-ba5e-23fcaaa04614" path="/var/lib/kubelet/pods/8de82997-0daf-469d-ba5e-23fcaaa04614/volumes" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.561041 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.576744 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c6775c9-9034-4be4-8002-201a25a35eab-serving-cert\") pod \"7c6775c9-9034-4be4-8002-201a25a35eab\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.576967 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-config\") pod \"7c6775c9-9034-4be4-8002-201a25a35eab\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.577091 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-client-ca\") pod \"7c6775c9-9034-4be4-8002-201a25a35eab\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.577120 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxfpx\" (UniqueName: \"kubernetes.io/projected/7c6775c9-9034-4be4-8002-201a25a35eab-kube-api-access-bxfpx\") pod \"7c6775c9-9034-4be4-8002-201a25a35eab\" (UID: \"7c6775c9-9034-4be4-8002-201a25a35eab\") " Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.577506 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-utilities\") pod \"redhat-marketplace-k5lsc\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.577984 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-config" (OuterVolumeSpecName: "config") pod "7c6775c9-9034-4be4-8002-201a25a35eab" (UID: "7c6775c9-9034-4be4-8002-201a25a35eab"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.578007 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-client-ca" (OuterVolumeSpecName: "client-ca") pod "7c6775c9-9034-4be4-8002-201a25a35eab" (UID: "7c6775c9-9034-4be4-8002-201a25a35eab"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.579484 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-catalog-content\") pod \"redhat-marketplace-k5lsc\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.579546 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkrts\" (UniqueName: \"kubernetes.io/projected/2d42be40-69b6-49a3-a4ad-ff74df0c284e-kube-api-access-gkrts\") pod \"redhat-marketplace-k5lsc\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.579620 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.579630 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c6775c9-9034-4be4-8002-201a25a35eab-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.581424 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c6775c9-9034-4be4-8002-201a25a35eab-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7c6775c9-9034-4be4-8002-201a25a35eab" (UID: "7c6775c9-9034-4be4-8002-201a25a35eab"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.581516 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c6775c9-9034-4be4-8002-201a25a35eab-kube-api-access-bxfpx" (OuterVolumeSpecName: "kube-api-access-bxfpx") pod "7c6775c9-9034-4be4-8002-201a25a35eab" (UID: "7c6775c9-9034-4be4-8002-201a25a35eab"). InnerVolumeSpecName "kube-api-access-bxfpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.679111 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n8xtl"] Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.680446 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-catalog-content\") pod \"redhat-marketplace-k5lsc\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.680496 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkrts\" (UniqueName: \"kubernetes.io/projected/2d42be40-69b6-49a3-a4ad-ff74df0c284e-kube-api-access-gkrts\") pod \"redhat-marketplace-k5lsc\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.680554 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-utilities\") pod \"redhat-marketplace-k5lsc\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.680586 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxfpx\" (UniqueName: \"kubernetes.io/projected/7c6775c9-9034-4be4-8002-201a25a35eab-kube-api-access-bxfpx\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.680600 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c6775c9-9034-4be4-8002-201a25a35eab-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.680953 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-utilities\") pod \"redhat-marketplace-k5lsc\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.681151 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-catalog-content\") pod \"redhat-marketplace-k5lsc\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: W0227 16:27:42.699603 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod317aef2b_3749_4a30_afc6_96f40516eae7.slice/crio-efd45ad4a9384a2151cc2b916ccc1c9736c61b8d2928b3ad4c7eac7c7a12916a WatchSource:0}: Error finding container efd45ad4a9384a2151cc2b916ccc1c9736c61b8d2928b3ad4c7eac7c7a12916a: Status 404 returned error can't find the container with id efd45ad4a9384a2151cc2b916ccc1c9736c61b8d2928b3ad4c7eac7c7a12916a Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.700475 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkrts\" (UniqueName: \"kubernetes.io/projected/2d42be40-69b6-49a3-a4ad-ff74df0c284e-kube-api-access-gkrts\") pod \"redhat-marketplace-k5lsc\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.757590 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zjr9n"] Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.787944 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.826379 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:27:42 crc kubenswrapper[4751]: W0227 16:27:42.841115 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poda1dd12d8_c2f3_44ea_97f2_aacff8b11867.slice/crio-95f87709d7156095b6b9a58abcdf28bb82cb52cf0dd0af6bff283188f99038d4 WatchSource:0}: Error finding container 95f87709d7156095b6b9a58abcdf28bb82cb52cf0dd0af6bff283188f99038d4: Status 404 returned error can't find the container with id 95f87709d7156095b6b9a58abcdf28bb82cb52cf0dd0af6bff283188f99038d4 Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.962433 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qs2km" event={"ID":"d474cfdd-550b-471d-aea1-6bc3f8532fa5","Type":"ContainerStarted","Data":"eb383373f257cf2ef50829b3e2cce6bae94778b69ce423dd80a7135b8f7b6ea0"} Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.966204 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" event={"ID":"9dd7ada9-9067-4084-b1e2-145e8bca15a0","Type":"ContainerStarted","Data":"dfdefc8245f85b78f779a7b9edbdabc9ca29e76a0ba55001d46d6a73ddf5974d"} Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.966245 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" event={"ID":"9dd7ada9-9067-4084-b1e2-145e8bca15a0","Type":"ContainerStarted","Data":"a4acb5bbd4d189fe7945c05a3555a203ffdaae50895fbdf4850746cfddf92abb"} Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.966737 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.971269 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.971802 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" event={"ID":"3ffa275a-62dc-46f6-ae70-34b5758d918e","Type":"ContainerStarted","Data":"e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d"} Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.971824 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" event={"ID":"3ffa275a-62dc-46f6-ae70-34b5758d918e","Type":"ContainerStarted","Data":"5008ec15ce6fc74df003e64e3b87fbe5a34cce1834ba743f6fc5c74837d3b72b"} Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.972235 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.975267 4751 generic.go:334] "Generic (PLEG): container finished" podID="7c6775c9-9034-4be4-8002-201a25a35eab" containerID="c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335" exitCode=0 Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.975316 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" event={"ID":"7c6775c9-9034-4be4-8002-201a25a35eab","Type":"ContainerDied","Data":"c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335"} Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.975336 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" event={"ID":"7c6775c9-9034-4be4-8002-201a25a35eab","Type":"ContainerDied","Data":"bfd01c651815c23c54b27d9531b599311c5940a0916292d01b048782ae249664"} Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.975354 4751 scope.go:117] "RemoveContainer" containerID="c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.975559 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.983678 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-qs2km" podStartSLOduration=11.983663488 podStartE2EDuration="11.983663488s" podCreationTimestamp="2026-02-27 16:27:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:42.980570537 +0000 UTC m=+225.127584984" watchObservedRunningTime="2026-02-27 16:27:42.983663488 +0000 UTC m=+225.130677935" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.997289 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" podStartSLOduration=181.997275045 podStartE2EDuration="3m1.997275045s" podCreationTimestamp="2026-02-27 16:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:42.99595966 +0000 UTC m=+225.142974107" watchObservedRunningTime="2026-02-27 16:27:42.997275045 +0000 UTC m=+225.144289482" Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.997375 4751 generic.go:334] "Generic (PLEG): container finished" podID="ed03128a-80cd-404b-991d-99f04fdab36e" containerID="455853a70d4b28a45fdd33d075329c0a36a935e54e42133306fd8bf0f8541d4b" exitCode=0 Feb 27 16:27:42 crc kubenswrapper[4751]: I0227 16:27:42.997448 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c9mg9" event={"ID":"ed03128a-80cd-404b-991d-99f04fdab36e","Type":"ContainerDied","Data":"455853a70d4b28a45fdd33d075329c0a36a935e54e42133306fd8bf0f8541d4b"} Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.001636 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a1dd12d8-c2f3-44ea-97f2-aacff8b11867","Type":"ContainerStarted","Data":"95f87709d7156095b6b9a58abcdf28bb82cb52cf0dd0af6bff283188f99038d4"} Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.014660 4751 generic.go:334] "Generic (PLEG): container finished" podID="361c2acb-bff0-4874-b92e-56f883281f35" containerID="7b408989dbd455d0eaee07b63ffeca0b23f11c6e80fdf8904637b2f3ec27ecf1" exitCode=0 Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.014974 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4j9x" event={"ID":"361c2acb-bff0-4874-b92e-56f883281f35","Type":"ContainerDied","Data":"7b408989dbd455d0eaee07b63ffeca0b23f11c6e80fdf8904637b2f3ec27ecf1"} Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.015000 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4j9x" event={"ID":"361c2acb-bff0-4874-b92e-56f883281f35","Type":"ContainerStarted","Data":"6220ce410a4bab54dde7045c8dbc5c0bd1e2dce7fe4c0ccdf4caf7e261d71faf"} Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.019831 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" podStartSLOduration=3.019811305 podStartE2EDuration="3.019811305s" podCreationTimestamp="2026-02-27 16:27:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:43.014476565 +0000 UTC m=+225.161491012" watchObservedRunningTime="2026-02-27 16:27:43.019811305 +0000 UTC m=+225.166825752" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.043933 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8xtl" event={"ID":"317aef2b-3749-4a30-afc6-96f40516eae7","Type":"ContainerStarted","Data":"efd45ad4a9384a2151cc2b916ccc1c9736c61b8d2928b3ad4c7eac7c7a12916a"} Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.075439 4751 scope.go:117] "RemoveContainer" containerID="c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335" Feb 27 16:27:43 crc kubenswrapper[4751]: E0227 16:27:43.080323 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335\": container with ID starting with c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335 not found: ID does not exist" containerID="c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.080387 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335"} err="failed to get container status \"c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335\": rpc error: code = NotFound desc = could not find container \"c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335\": container with ID starting with c10063bab7714cd26bc642eaeb2592e5c0b500127ca3799e25fca96db5302335 not found: ID does not exist" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.134539 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk"] Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.146534 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rczbk"] Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.329596 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5lsc"] Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.438991 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.481178 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r48bt"] Feb 27 16:27:43 crc kubenswrapper[4751]: E0227 16:27:43.481466 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2680126d-1cf3-4cbd-a130-3d8d0070a394" containerName="collect-profiles" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.481482 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2680126d-1cf3-4cbd-a130-3d8d0070a394" containerName="collect-profiles" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.481601 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2680126d-1cf3-4cbd-a130-3d8d0070a394" containerName="collect-profiles" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.482341 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.484687 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.490880 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r48bt"] Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.517499 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2680126d-1cf3-4cbd-a130-3d8d0070a394-secret-volume\") pod \"2680126d-1cf3-4cbd-a130-3d8d0070a394\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.517550 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmcq7\" (UniqueName: \"kubernetes.io/projected/2680126d-1cf3-4cbd-a130-3d8d0070a394-kube-api-access-bmcq7\") pod \"2680126d-1cf3-4cbd-a130-3d8d0070a394\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.517637 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2680126d-1cf3-4cbd-a130-3d8d0070a394-config-volume\") pod \"2680126d-1cf3-4cbd-a130-3d8d0070a394\" (UID: \"2680126d-1cf3-4cbd-a130-3d8d0070a394\") " Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.517838 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-utilities\") pod \"redhat-operators-r48bt\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.517881 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4nql\" (UniqueName: \"kubernetes.io/projected/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-kube-api-access-s4nql\") pod \"redhat-operators-r48bt\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.517936 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-catalog-content\") pod \"redhat-operators-r48bt\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.519580 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2680126d-1cf3-4cbd-a130-3d8d0070a394-config-volume" (OuterVolumeSpecName: "config-volume") pod "2680126d-1cf3-4cbd-a130-3d8d0070a394" (UID: "2680126d-1cf3-4cbd-a130-3d8d0070a394"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.530799 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:43 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:43 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:43 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.530851 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.532538 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2680126d-1cf3-4cbd-a130-3d8d0070a394-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2680126d-1cf3-4cbd-a130-3d8d0070a394" (UID: "2680126d-1cf3-4cbd-a130-3d8d0070a394"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.535288 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2680126d-1cf3-4cbd-a130-3d8d0070a394-kube-api-access-bmcq7" (OuterVolumeSpecName: "kube-api-access-bmcq7") pod "2680126d-1cf3-4cbd-a130-3d8d0070a394" (UID: "2680126d-1cf3-4cbd-a130-3d8d0070a394"). InnerVolumeSpecName "kube-api-access-bmcq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.618989 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-catalog-content\") pod \"redhat-operators-r48bt\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.619070 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-utilities\") pod \"redhat-operators-r48bt\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.619210 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4nql\" (UniqueName: \"kubernetes.io/projected/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-kube-api-access-s4nql\") pod \"redhat-operators-r48bt\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.619301 4751 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2680126d-1cf3-4cbd-a130-3d8d0070a394-config-volume\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.619318 4751 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2680126d-1cf3-4cbd-a130-3d8d0070a394-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.619327 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmcq7\" (UniqueName: \"kubernetes.io/projected/2680126d-1cf3-4cbd-a130-3d8d0070a394-kube-api-access-bmcq7\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.619529 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-catalog-content\") pod \"redhat-operators-r48bt\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.619581 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-utilities\") pod \"redhat-operators-r48bt\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.641856 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4nql\" (UniqueName: \"kubernetes.io/projected/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-kube-api-access-s4nql\") pod \"redhat-operators-r48bt\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.856787 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.881550 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nf7jn"] Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.882677 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.892970 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nf7jn"] Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.921899 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-catalog-content\") pod \"redhat-operators-nf7jn\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.921998 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5vxn\" (UniqueName: \"kubernetes.io/projected/b5d14efb-d682-4728-846f-3b379fe8d390-kube-api-access-w5vxn\") pod \"redhat-operators-nf7jn\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:43 crc kubenswrapper[4751]: I0227 16:27:43.922016 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-utilities\") pod \"redhat-operators-nf7jn\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.037639 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-catalog-content\") pod \"redhat-operators-nf7jn\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.037698 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.037733 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.037738 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5vxn\" (UniqueName: \"kubernetes.io/projected/b5d14efb-d682-4728-846f-3b379fe8d390-kube-api-access-w5vxn\") pod \"redhat-operators-nf7jn\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.037764 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-utilities\") pod \"redhat-operators-nf7jn\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.038281 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-utilities\") pod \"redhat-operators-nf7jn\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.038352 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-catalog-content\") pod \"redhat-operators-nf7jn\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.060864 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.069785 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5vxn\" (UniqueName: \"kubernetes.io/projected/b5d14efb-d682-4728-846f-3b379fe8d390-kube-api-access-w5vxn\") pod \"redhat-operators-nf7jn\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.089037 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" event={"ID":"2680126d-1cf3-4cbd-a130-3d8d0070a394","Type":"ContainerDied","Data":"981587c0e32f44253dd1949a9fdb4ccc4b9af02fcde65721f31b0bcce65393cc"} Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.089079 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="981587c0e32f44253dd1949a9fdb4ccc4b9af02fcde65721f31b0bcce65393cc" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.089170 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.093271 4751 generic.go:334] "Generic (PLEG): container finished" podID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerID="fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e" exitCode=0 Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.094908 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5lsc" event={"ID":"2d42be40-69b6-49a3-a4ad-ff74df0c284e","Type":"ContainerDied","Data":"fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e"} Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.094937 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5lsc" event={"ID":"2d42be40-69b6-49a3-a4ad-ff74df0c284e","Type":"ContainerStarted","Data":"78f8a4f08efc560bb6601710ac7dbb46305617b843ba325b6d453438372dbf3f"} Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.121168 4751 patch_prober.go:28] interesting pod/downloads-7954f5f757-5f8sr container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" start-of-body= Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.121487 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-5f8sr" podUID="ee5f53d5-2c38-465d-ad33-d5c0c5eb3923" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.121519 4751 patch_prober.go:28] interesting pod/downloads-7954f5f757-5f8sr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" start-of-body= Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.121721 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-5f8sr" podUID="ee5f53d5-2c38-465d-ad33-d5c0c5eb3923" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.139385 4751 generic.go:334] "Generic (PLEG): container finished" podID="a1dd12d8-c2f3-44ea-97f2-aacff8b11867" containerID="19a0ee9470e35decb26da79c135eba3f3980e1d78a70e92611d7679e3655dbf3" exitCode=0 Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.139497 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a1dd12d8-c2f3-44ea-97f2-aacff8b11867","Type":"ContainerDied","Data":"19a0ee9470e35decb26da79c135eba3f3980e1d78a70e92611d7679e3655dbf3"} Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.160769 4751 generic.go:334] "Generic (PLEG): container finished" podID="317aef2b-3749-4a30-afc6-96f40516eae7" containerID="63a8255fdb0eb404763761e4e4ae89220717f807a19571c937fda7eb2e74680e" exitCode=0 Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.160835 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8xtl" event={"ID":"317aef2b-3749-4a30-afc6-96f40516eae7","Type":"ContainerDied","Data":"63a8255fdb0eb404763761e4e4ae89220717f807a19571c937fda7eb2e74680e"} Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.226891 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.457753 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nf7jn"] Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.464742 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.464827 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.467310 4751 patch_prober.go:28] interesting pod/console-f9d7485db-hb87p container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.467352 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-hb87p" podUID="ee39d7ed-b569-4c34-8c19-a5f386c85b5c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.495863 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r48bt"] Feb 27 16:27:44 crc kubenswrapper[4751]: W0227 16:27:44.498528 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5d14efb_d682_4728_846f_3b379fe8d390.slice/crio-31a63462f83966616c04d1a59a8c549cd7eebd12e7c01513d90fcefec277e8fd WatchSource:0}: Error finding container 31a63462f83966616c04d1a59a8c549cd7eebd12e7c01513d90fcefec277e8fd: Status 404 returned error can't find the container with id 31a63462f83966616c04d1a59a8c549cd7eebd12e7c01513d90fcefec277e8fd Feb 27 16:27:44 crc kubenswrapper[4751]: W0227 16:27:44.504732 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd22dcd4_2184_46b5_9c2f_ed1a65c64fd0.slice/crio-8bbef1ff2e3802c7153d56db6a1dc1340de7ff13a4208b50afa82225dd71f467 WatchSource:0}: Error finding container 8bbef1ff2e3802c7153d56db6a1dc1340de7ff13a4208b50afa82225dd71f467: Status 404 returned error can't find the container with id 8bbef1ff2e3802c7153d56db6a1dc1340de7ff13a4208b50afa82225dd71f467 Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.530163 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:44 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:44 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:44 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.530234 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.539814 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c6775c9-9034-4be4-8002-201a25a35eab" path="/var/lib/kubelet/pods/7c6775c9-9034-4be4-8002-201a25a35eab/volumes" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.540338 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.558267 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn"] Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.559171 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.562819 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn"] Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.562999 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.563142 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.563196 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.563335 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.563580 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.563662 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.651332 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.750691 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdxnq\" (UniqueName: \"kubernetes.io/projected/1607c4d5-1595-4956-90aa-3e5231cb1725-kube-api-access-wdxnq\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.750734 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-config\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.750858 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-client-ca\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.751352 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1607c4d5-1595-4956-90aa-3e5231cb1725-serving-cert\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.852022 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-client-ca\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.852063 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1607c4d5-1595-4956-90aa-3e5231cb1725-serving-cert\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.852121 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdxnq\" (UniqueName: \"kubernetes.io/projected/1607c4d5-1595-4956-90aa-3e5231cb1725-kube-api-access-wdxnq\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.852148 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-config\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.853817 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-client-ca\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.854725 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-config\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.860170 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1607c4d5-1595-4956-90aa-3e5231cb1725-serving-cert\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.871642 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdxnq\" (UniqueName: \"kubernetes.io/projected/1607c4d5-1595-4956-90aa-3e5231cb1725-kube-api-access-wdxnq\") pod \"route-controller-manager-78cb796998-crfmn\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.876449 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:44 crc kubenswrapper[4751]: I0227 16:27:44.940353 4751 ???:1] "http: TLS handshake error from 192.168.126.11:33816: no serving certificate available for the kubelet" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.199768 4751 generic.go:334] "Generic (PLEG): container finished" podID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerID="54db31796b55c0b7c86de429a4044167ed26177ee9d2e09d3cbaa0636b09a26e" exitCode=0 Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.200092 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r48bt" event={"ID":"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0","Type":"ContainerDied","Data":"54db31796b55c0b7c86de429a4044167ed26177ee9d2e09d3cbaa0636b09a26e"} Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.200118 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r48bt" event={"ID":"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0","Type":"ContainerStarted","Data":"8bbef1ff2e3802c7153d56db6a1dc1340de7ff13a4208b50afa82225dd71f467"} Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.213539 4751 generic.go:334] "Generic (PLEG): container finished" podID="b5d14efb-d682-4728-846f-3b379fe8d390" containerID="f58eac2c5701eabd37430918ddacbf31d945e58b18eda53efe05405b81a252eb" exitCode=0 Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.213592 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nf7jn" event={"ID":"b5d14efb-d682-4728-846f-3b379fe8d390","Type":"ContainerDied","Data":"f58eac2c5701eabd37430918ddacbf31d945e58b18eda53efe05405b81a252eb"} Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.213647 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nf7jn" event={"ID":"b5d14efb-d682-4728-846f-3b379fe8d390","Type":"ContainerStarted","Data":"31a63462f83966616c04d1a59a8c549cd7eebd12e7c01513d90fcefec277e8fd"} Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.220841 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-knf9q" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.420650 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.430446 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.431696 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.433696 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.433872 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.453204 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn"] Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.550794 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:45 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:45 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:45 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.550835 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.572252 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"4f6e1fa9-8ad5-445a-bd26-e7767999c24f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.572321 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"4f6e1fa9-8ad5-445a-bd26-e7767999c24f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.673321 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"4f6e1fa9-8ad5-445a-bd26-e7767999c24f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.673663 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"4f6e1fa9-8ad5-445a-bd26-e7767999c24f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.677700 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"4f6e1fa9-8ad5-445a-bd26-e7767999c24f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.694301 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"4f6e1fa9-8ad5-445a-bd26-e7767999c24f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.700564 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.787627 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.880029 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kubelet-dir\") pod \"a1dd12d8-c2f3-44ea-97f2-aacff8b11867\" (UID: \"a1dd12d8-c2f3-44ea-97f2-aacff8b11867\") " Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.880116 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kube-api-access\") pod \"a1dd12d8-c2f3-44ea-97f2-aacff8b11867\" (UID: \"a1dd12d8-c2f3-44ea-97f2-aacff8b11867\") " Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.880284 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a1dd12d8-c2f3-44ea-97f2-aacff8b11867" (UID: "a1dd12d8-c2f3-44ea-97f2-aacff8b11867"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.881536 4751 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.883869 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a1dd12d8-c2f3-44ea-97f2-aacff8b11867" (UID: "a1dd12d8-c2f3-44ea-97f2-aacff8b11867"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:27:45 crc kubenswrapper[4751]: I0227 16:27:45.982341 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a1dd12d8-c2f3-44ea-97f2-aacff8b11867-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 27 16:27:46 crc kubenswrapper[4751]: I0227 16:27:46.055074 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 27 16:27:46 crc kubenswrapper[4751]: W0227 16:27:46.066838 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod4f6e1fa9_8ad5_445a_bd26_e7767999c24f.slice/crio-0b1c212c1f5f43991b6e8c5be0489a734a2feeb7ce95a93d4cd7ba2bfbc74211 WatchSource:0}: Error finding container 0b1c212c1f5f43991b6e8c5be0489a734a2feeb7ce95a93d4cd7ba2bfbc74211: Status 404 returned error can't find the container with id 0b1c212c1f5f43991b6e8c5be0489a734a2feeb7ce95a93d4cd7ba2bfbc74211 Feb 27 16:27:46 crc kubenswrapper[4751]: I0227 16:27:46.223708 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4f6e1fa9-8ad5-445a-bd26-e7767999c24f","Type":"ContainerStarted","Data":"0b1c212c1f5f43991b6e8c5be0489a734a2feeb7ce95a93d4cd7ba2bfbc74211"} Feb 27 16:27:46 crc kubenswrapper[4751]: I0227 16:27:46.229145 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" event={"ID":"1607c4d5-1595-4956-90aa-3e5231cb1725","Type":"ContainerStarted","Data":"d22f49f1cb688ffb4147644985cf27c8bb29f53bb841161f0cf9c2a109f18be1"} Feb 27 16:27:46 crc kubenswrapper[4751]: I0227 16:27:46.229190 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" event={"ID":"1607c4d5-1595-4956-90aa-3e5231cb1725","Type":"ContainerStarted","Data":"08bf5fa5554e14458467a4bfefd8e346729812e37bbd1af7a9bcc52acc3a32db"} Feb 27 16:27:46 crc kubenswrapper[4751]: I0227 16:27:46.232865 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 27 16:27:46 crc kubenswrapper[4751]: I0227 16:27:46.232963 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a1dd12d8-c2f3-44ea-97f2-aacff8b11867","Type":"ContainerDied","Data":"95f87709d7156095b6b9a58abcdf28bb82cb52cf0dd0af6bff283188f99038d4"} Feb 27 16:27:46 crc kubenswrapper[4751]: I0227 16:27:46.233001 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95f87709d7156095b6b9a58abcdf28bb82cb52cf0dd0af6bff283188f99038d4" Feb 27 16:27:46 crc kubenswrapper[4751]: I0227 16:27:46.529271 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:46 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:46 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:46 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:46 crc kubenswrapper[4751]: I0227 16:27:46.529317 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:46 crc kubenswrapper[4751]: I0227 16:27:46.738350 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-vf5cg" Feb 27 16:27:47 crc kubenswrapper[4751]: I0227 16:27:47.247029 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4f6e1fa9-8ad5-445a-bd26-e7767999c24f","Type":"ContainerStarted","Data":"4adc7d8953c2de43ccb939801a4332662d2a7165077d0a91b4cb795f1080764e"} Feb 27 16:27:47 crc kubenswrapper[4751]: I0227 16:27:47.259194 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.259179137 podStartE2EDuration="2.259179137s" podCreationTimestamp="2026-02-27 16:27:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:47.25893237 +0000 UTC m=+229.405946817" watchObservedRunningTime="2026-02-27 16:27:47.259179137 +0000 UTC m=+229.406193584" Feb 27 16:27:47 crc kubenswrapper[4751]: I0227 16:27:47.278963 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" podStartSLOduration=7.278947315 podStartE2EDuration="7.278947315s" podCreationTimestamp="2026-02-27 16:27:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:27:47.274129979 +0000 UTC m=+229.421144426" watchObservedRunningTime="2026-02-27 16:27:47.278947315 +0000 UTC m=+229.425961762" Feb 27 16:27:47 crc kubenswrapper[4751]: I0227 16:27:47.416019 4751 ???:1] "http: TLS handshake error from 192.168.126.11:39914: no serving certificate available for the kubelet" Feb 27 16:27:47 crc kubenswrapper[4751]: I0227 16:27:47.528954 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:47 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:47 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:47 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:47 crc kubenswrapper[4751]: I0227 16:27:47.529000 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:48 crc kubenswrapper[4751]: I0227 16:27:48.269415 4751 generic.go:334] "Generic (PLEG): container finished" podID="4f6e1fa9-8ad5-445a-bd26-e7767999c24f" containerID="4adc7d8953c2de43ccb939801a4332662d2a7165077d0a91b4cb795f1080764e" exitCode=0 Feb 27 16:27:48 crc kubenswrapper[4751]: I0227 16:27:48.269452 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4f6e1fa9-8ad5-445a-bd26-e7767999c24f","Type":"ContainerDied","Data":"4adc7d8953c2de43ccb939801a4332662d2a7165077d0a91b4cb795f1080764e"} Feb 27 16:27:48 crc kubenswrapper[4751]: I0227 16:27:48.536486 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:48 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:48 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:48 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:48 crc kubenswrapper[4751]: I0227 16:27:48.536581 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:49 crc kubenswrapper[4751]: I0227 16:27:49.529691 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:49 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:49 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:49 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:49 crc kubenswrapper[4751]: I0227 16:27:49.529746 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:50 crc kubenswrapper[4751]: I0227 16:27:50.081731 4751 ???:1] "http: TLS handshake error from 192.168.126.11:39930: no serving certificate available for the kubelet" Feb 27 16:27:50 crc kubenswrapper[4751]: I0227 16:27:50.527264 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:50 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:50 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:50 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:50 crc kubenswrapper[4751]: I0227 16:27:50.527317 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:51 crc kubenswrapper[4751]: I0227 16:27:51.528345 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:51 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:51 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:51 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:51 crc kubenswrapper[4751]: I0227 16:27:51.528427 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:52 crc kubenswrapper[4751]: I0227 16:27:52.527975 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:52 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:52 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:52 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:52 crc kubenswrapper[4751]: I0227 16:27:52.528239 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:53 crc kubenswrapper[4751]: I0227 16:27:53.530351 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:53 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:53 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:53 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:53 crc kubenswrapper[4751]: I0227 16:27:53.530484 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:54 crc kubenswrapper[4751]: I0227 16:27:54.126220 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-5f8sr" Feb 27 16:27:54 crc kubenswrapper[4751]: I0227 16:27:54.465465 4751 patch_prober.go:28] interesting pod/console-f9d7485db-hb87p container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Feb 27 16:27:54 crc kubenswrapper[4751]: I0227 16:27:54.465524 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-hb87p" podUID="ee39d7ed-b569-4c34-8c19-a5f386c85b5c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" Feb 27 16:27:54 crc kubenswrapper[4751]: I0227 16:27:54.528236 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:54 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:54 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:54 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:54 crc kubenswrapper[4751]: I0227 16:27:54.528284 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:54 crc kubenswrapper[4751]: I0227 16:27:54.876746 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:54 crc kubenswrapper[4751]: I0227 16:27:54.882208 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:27:55 crc kubenswrapper[4751]: I0227 16:27:55.529369 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:55 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:55 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:55 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:55 crc kubenswrapper[4751]: I0227 16:27:55.529448 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:56 crc kubenswrapper[4751]: I0227 16:27:56.529316 4751 patch_prober.go:28] interesting pod/router-default-5444994796-hsqjr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 27 16:27:56 crc kubenswrapper[4751]: [-]has-synced failed: reason withheld Feb 27 16:27:56 crc kubenswrapper[4751]: [+]process-running ok Feb 27 16:27:56 crc kubenswrapper[4751]: healthz check failed Feb 27 16:27:56 crc kubenswrapper[4751]: I0227 16:27:56.529455 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsqjr" podUID="51bdc38f-9b69-437f-9a17-edb963fb01ab" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 27 16:27:57 crc kubenswrapper[4751]: I0227 16:27:57.528982 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:57 crc kubenswrapper[4751]: I0227 16:27:57.531415 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-hsqjr" Feb 27 16:27:58 crc kubenswrapper[4751]: I0227 16:27:58.918757 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:27:58 crc kubenswrapper[4751]: I0227 16:27:58.919771 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:27:59 crc kubenswrapper[4751]: I0227 16:27:59.522268 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-59668987f7-k9dmx"] Feb 27 16:27:59 crc kubenswrapper[4751]: I0227 16:27:59.522917 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" podUID="9dd7ada9-9067-4084-b1e2-145e8bca15a0" containerName="controller-manager" containerID="cri-o://dfdefc8245f85b78f779a7b9edbdabc9ca29e76a0ba55001d46d6a73ddf5974d" gracePeriod=30 Feb 27 16:27:59 crc kubenswrapper[4751]: I0227 16:27:59.559693 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn"] Feb 27 16:27:59 crc kubenswrapper[4751]: I0227 16:27:59.560008 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" podUID="1607c4d5-1595-4956-90aa-3e5231cb1725" containerName="route-controller-manager" containerID="cri-o://d22f49f1cb688ffb4147644985cf27c8bb29f53bb841161f0cf9c2a109f18be1" gracePeriod=30 Feb 27 16:28:00 crc kubenswrapper[4751]: I0227 16:28:00.131025 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536828-jvl6d"] Feb 27 16:28:00 crc kubenswrapper[4751]: E0227 16:28:00.131258 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1dd12d8-c2f3-44ea-97f2-aacff8b11867" containerName="pruner" Feb 27 16:28:00 crc kubenswrapper[4751]: I0227 16:28:00.131271 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1dd12d8-c2f3-44ea-97f2-aacff8b11867" containerName="pruner" Feb 27 16:28:00 crc kubenswrapper[4751]: I0227 16:28:00.131370 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1dd12d8-c2f3-44ea-97f2-aacff8b11867" containerName="pruner" Feb 27 16:28:00 crc kubenswrapper[4751]: I0227 16:28:00.131792 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536828-jvl6d" Feb 27 16:28:00 crc kubenswrapper[4751]: I0227 16:28:00.138608 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:28:00 crc kubenswrapper[4751]: I0227 16:28:00.142107 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536828-jvl6d"] Feb 27 16:28:00 crc kubenswrapper[4751]: I0227 16:28:00.294290 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dmf8\" (UniqueName: \"kubernetes.io/projected/9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d-kube-api-access-6dmf8\") pod \"auto-csr-approver-29536828-jvl6d\" (UID: \"9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d\") " pod="openshift-infra/auto-csr-approver-29536828-jvl6d" Feb 27 16:28:00 crc kubenswrapper[4751]: I0227 16:28:00.395641 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dmf8\" (UniqueName: \"kubernetes.io/projected/9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d-kube-api-access-6dmf8\") pod \"auto-csr-approver-29536828-jvl6d\" (UID: \"9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d\") " pod="openshift-infra/auto-csr-approver-29536828-jvl6d" Feb 27 16:28:00 crc kubenswrapper[4751]: I0227 16:28:00.426577 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dmf8\" (UniqueName: \"kubernetes.io/projected/9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d-kube-api-access-6dmf8\") pod \"auto-csr-approver-29536828-jvl6d\" (UID: \"9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d\") " pod="openshift-infra/auto-csr-approver-29536828-jvl6d" Feb 27 16:28:00 crc kubenswrapper[4751]: I0227 16:28:00.458586 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536828-jvl6d" Feb 27 16:28:01 crc kubenswrapper[4751]: I0227 16:28:01.365499 4751 generic.go:334] "Generic (PLEG): container finished" podID="9dd7ada9-9067-4084-b1e2-145e8bca15a0" containerID="dfdefc8245f85b78f779a7b9edbdabc9ca29e76a0ba55001d46d6a73ddf5974d" exitCode=0 Feb 27 16:28:01 crc kubenswrapper[4751]: I0227 16:28:01.365610 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" event={"ID":"9dd7ada9-9067-4084-b1e2-145e8bca15a0","Type":"ContainerDied","Data":"dfdefc8245f85b78f779a7b9edbdabc9ca29e76a0ba55001d46d6a73ddf5974d"} Feb 27 16:28:01 crc kubenswrapper[4751]: I0227 16:28:01.369217 4751 generic.go:334] "Generic (PLEG): container finished" podID="1607c4d5-1595-4956-90aa-3e5231cb1725" containerID="d22f49f1cb688ffb4147644985cf27c8bb29f53bb841161f0cf9c2a109f18be1" exitCode=0 Feb 27 16:28:01 crc kubenswrapper[4751]: I0227 16:28:01.369257 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" event={"ID":"1607c4d5-1595-4956-90aa-3e5231cb1725","Type":"ContainerDied","Data":"d22f49f1cb688ffb4147644985cf27c8bb29f53bb841161f0cf9c2a109f18be1"} Feb 27 16:28:01 crc kubenswrapper[4751]: I0227 16:28:01.978764 4751 patch_prober.go:28] interesting pod/controller-manager-59668987f7-k9dmx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.49:8443/healthz\": dial tcp 10.217.0.49:8443: connect: connection refused" start-of-body= Feb 27 16:28:01 crc kubenswrapper[4751]: I0227 16:28:01.978823 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" podUID="9dd7ada9-9067-4084-b1e2-145e8bca15a0" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.49:8443/healthz\": dial tcp 10.217.0.49:8443: connect: connection refused" Feb 27 16:28:02 crc kubenswrapper[4751]: I0227 16:28:02.382245 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.146337 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.245444 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kubelet-dir\") pod \"4f6e1fa9-8ad5-445a-bd26-e7767999c24f\" (UID: \"4f6e1fa9-8ad5-445a-bd26-e7767999c24f\") " Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.245492 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kube-api-access\") pod \"4f6e1fa9-8ad5-445a-bd26-e7767999c24f\" (UID: \"4f6e1fa9-8ad5-445a-bd26-e7767999c24f\") " Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.245564 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4f6e1fa9-8ad5-445a-bd26-e7767999c24f" (UID: "4f6e1fa9-8ad5-445a-bd26-e7767999c24f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.245779 4751 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.259036 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4f6e1fa9-8ad5-445a-bd26-e7767999c24f" (UID: "4f6e1fa9-8ad5-445a-bd26-e7767999c24f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.346921 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f6e1fa9-8ad5-445a-bd26-e7767999c24f-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.392304 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4f6e1fa9-8ad5-445a-bd26-e7767999c24f","Type":"ContainerDied","Data":"0b1c212c1f5f43991b6e8c5be0489a734a2feeb7ce95a93d4cd7ba2bfbc74211"} Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.392343 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b1c212c1f5f43991b6e8c5be0489a734a2feeb7ce95a93d4cd7ba2bfbc74211" Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.392344 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.649662 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:28:04 crc kubenswrapper[4751]: I0227 16:28:04.656439 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:28:05 crc kubenswrapper[4751]: I0227 16:28:05.878299 4751 patch_prober.go:28] interesting pod/route-controller-manager-78cb796998-crfmn container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.55:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 27 16:28:05 crc kubenswrapper[4751]: I0227 16:28:05.878378 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" podUID="1607c4d5-1595-4956-90aa-3e5231cb1725" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.55:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 27 16:28:06 crc kubenswrapper[4751]: E0227 16:28:06.375661 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:2086b7801d96d309e48e1c678789d95541de89bbae905e6f5a8de845927ca051: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:2086b7801d96d309e48e1c678789d95541de89bbae905e6f5a8de845927ca051\": context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Feb 27 16:28:06 crc kubenswrapper[4751]: E0227 16:28:06.375933 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w5vxn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-nf7jn_openshift-marketplace(b5d14efb-d682-4728-846f-3b379fe8d390): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:2086b7801d96d309e48e1c678789d95541de89bbae905e6f5a8de845927ca051: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:2086b7801d96d309e48e1c678789d95541de89bbae905e6f5a8de845927ca051\": context canceled" logger="UnhandledError" Feb 27 16:28:06 crc kubenswrapper[4751]: E0227 16:28:06.377985 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:2086b7801d96d309e48e1c678789d95541de89bbae905e6f5a8de845927ca051: Get \\\"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:2086b7801d96d309e48e1c678789d95541de89bbae905e6f5a8de845927ca051\\\": context canceled\"" pod="openshift-marketplace/redhat-operators-nf7jn" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" Feb 27 16:28:06 crc kubenswrapper[4751]: E0227 16:28:06.846196 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-nf7jn" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" Feb 27 16:28:10 crc kubenswrapper[4751]: I0227 16:28:10.595999 4751 ???:1] "http: TLS handshake error from 192.168.126.11:37946: no serving certificate available for the kubelet" Feb 27 16:28:11 crc kubenswrapper[4751]: E0227 16:28:11.887206 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 16:28:11 crc kubenswrapper[4751]: E0227 16:28:11.887386 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 16:28:11 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 16:28:11 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m7bgh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536826-mxq7k_openshift-infra(11a526fe-64f1-4da8-a0e8-ed276ec069fb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled Feb 27 16:28:11 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 16:28:11 crc kubenswrapper[4751]: E0227 16:28:11.888702 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-infra/auto-csr-approver-29536826-mxq7k" podUID="11a526fe-64f1-4da8-a0e8-ed276ec069fb" Feb 27 16:28:12 crc kubenswrapper[4751]: E0227 16:28:12.442558 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536826-mxq7k" podUID="11a526fe-64f1-4da8-a0e8-ed276ec069fb" Feb 27 16:28:12 crc kubenswrapper[4751]: I0227 16:28:12.978570 4751 patch_prober.go:28] interesting pod/controller-manager-59668987f7-k9dmx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.49:8443/healthz\": context deadline exceeded" start-of-body= Feb 27 16:28:12 crc kubenswrapper[4751]: I0227 16:28:12.978639 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" podUID="9dd7ada9-9067-4084-b1e2-145e8bca15a0" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.49:8443/healthz\": context deadline exceeded" Feb 27 16:28:14 crc kubenswrapper[4751]: I0227 16:28:14.942383 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h46g5" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.368388 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 27 16:28:15 crc kubenswrapper[4751]: E0227 16:28:15.368674 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f6e1fa9-8ad5-445a-bd26-e7767999c24f" containerName="pruner" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.368690 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f6e1fa9-8ad5-445a-bd26-e7767999c24f" containerName="pruner" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.368838 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f6e1fa9-8ad5-445a-bd26-e7767999c24f" containerName="pruner" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.369441 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.379013 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.379369 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.383862 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.502086 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d1978137-fa2f-4172-af3f-bfb0a02c88b7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.502186 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d1978137-fa2f-4172-af3f-bfb0a02c88b7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.603727 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d1978137-fa2f-4172-af3f-bfb0a02c88b7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.603872 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d1978137-fa2f-4172-af3f-bfb0a02c88b7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.603892 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d1978137-fa2f-4172-af3f-bfb0a02c88b7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.636146 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d1978137-fa2f-4172-af3f-bfb0a02c88b7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.696811 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.877797 4751 patch_prober.go:28] interesting pod/route-controller-manager-78cb796998-crfmn container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.55:8443/healthz\": dial tcp 10.217.0.55:8443: i/o timeout" start-of-body= Feb 27 16:28:15 crc kubenswrapper[4751]: I0227 16:28:15.878059 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" podUID="1607c4d5-1595-4956-90aa-3e5231cb1725" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.55:8443/healthz\": dial tcp 10.217.0.55:8443: i/o timeout" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.159227 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.163555 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.199874 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k"] Feb 27 16:28:16 crc kubenswrapper[4751]: E0227 16:28:16.200237 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1607c4d5-1595-4956-90aa-3e5231cb1725" containerName="route-controller-manager" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.200282 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1607c4d5-1595-4956-90aa-3e5231cb1725" containerName="route-controller-manager" Feb 27 16:28:16 crc kubenswrapper[4751]: E0227 16:28:16.200296 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dd7ada9-9067-4084-b1e2-145e8bca15a0" containerName="controller-manager" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.200305 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dd7ada9-9067-4084-b1e2-145e8bca15a0" containerName="controller-manager" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.200443 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dd7ada9-9067-4084-b1e2-145e8bca15a0" containerName="controller-manager" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.200471 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="1607c4d5-1595-4956-90aa-3e5231cb1725" containerName="route-controller-manager" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.200957 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.208723 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k"] Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.317093 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-proxy-ca-bundles\") pod \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.317168 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mb7hx\" (UniqueName: \"kubernetes.io/projected/9dd7ada9-9067-4084-b1e2-145e8bca15a0-kube-api-access-mb7hx\") pod \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.317194 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-client-ca\") pod \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.317213 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dd7ada9-9067-4084-b1e2-145e8bca15a0-serving-cert\") pod \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.317249 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-config\") pod \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\" (UID: \"9dd7ada9-9067-4084-b1e2-145e8bca15a0\") " Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.317865 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "9dd7ada9-9067-4084-b1e2-145e8bca15a0" (UID: "9dd7ada9-9067-4084-b1e2-145e8bca15a0"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.317908 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-client-ca" (OuterVolumeSpecName: "client-ca") pod "9dd7ada9-9067-4084-b1e2-145e8bca15a0" (UID: "9dd7ada9-9067-4084-b1e2-145e8bca15a0"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.318078 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1607c4d5-1595-4956-90aa-3e5231cb1725-serving-cert\") pod \"1607c4d5-1595-4956-90aa-3e5231cb1725\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.318104 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-config\") pod \"1607c4d5-1595-4956-90aa-3e5231cb1725\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.318236 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-config" (OuterVolumeSpecName: "config") pod "9dd7ada9-9067-4084-b1e2-145e8bca15a0" (UID: "9dd7ada9-9067-4084-b1e2-145e8bca15a0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.318446 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdxnq\" (UniqueName: \"kubernetes.io/projected/1607c4d5-1595-4956-90aa-3e5231cb1725-kube-api-access-wdxnq\") pod \"1607c4d5-1595-4956-90aa-3e5231cb1725\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.318984 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-client-ca\") pod \"1607c4d5-1595-4956-90aa-3e5231cb1725\" (UID: \"1607c4d5-1595-4956-90aa-3e5231cb1725\") " Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.319204 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-client-ca\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.319224 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-config" (OuterVolumeSpecName: "config") pod "1607c4d5-1595-4956-90aa-3e5231cb1725" (UID: "1607c4d5-1595-4956-90aa-3e5231cb1725"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.319253 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhlg9\" (UniqueName: \"kubernetes.io/projected/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-kube-api-access-qhlg9\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.319430 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-config\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.319482 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-serving-cert\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.319537 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-client-ca" (OuterVolumeSpecName: "client-ca") pod "1607c4d5-1595-4956-90aa-3e5231cb1725" (UID: "1607c4d5-1595-4956-90aa-3e5231cb1725"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.319602 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.319619 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.319635 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.319646 4751 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9dd7ada9-9067-4084-b1e2-145e8bca15a0-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.320313 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dd7ada9-9067-4084-b1e2-145e8bca15a0-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9dd7ada9-9067-4084-b1e2-145e8bca15a0" (UID: "9dd7ada9-9067-4084-b1e2-145e8bca15a0"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.322541 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1607c4d5-1595-4956-90aa-3e5231cb1725-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1607c4d5-1595-4956-90aa-3e5231cb1725" (UID: "1607c4d5-1595-4956-90aa-3e5231cb1725"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.322572 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1607c4d5-1595-4956-90aa-3e5231cb1725-kube-api-access-wdxnq" (OuterVolumeSpecName: "kube-api-access-wdxnq") pod "1607c4d5-1595-4956-90aa-3e5231cb1725" (UID: "1607c4d5-1595-4956-90aa-3e5231cb1725"). InnerVolumeSpecName "kube-api-access-wdxnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.336244 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dd7ada9-9067-4084-b1e2-145e8bca15a0-kube-api-access-mb7hx" (OuterVolumeSpecName: "kube-api-access-mb7hx") pod "9dd7ada9-9067-4084-b1e2-145e8bca15a0" (UID: "9dd7ada9-9067-4084-b1e2-145e8bca15a0"). InnerVolumeSpecName "kube-api-access-mb7hx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.421385 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-client-ca\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.421507 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhlg9\" (UniqueName: \"kubernetes.io/projected/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-kube-api-access-qhlg9\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.421557 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-config\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.421598 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-serving-cert\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.421648 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1607c4d5-1595-4956-90aa-3e5231cb1725-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.421665 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdxnq\" (UniqueName: \"kubernetes.io/projected/1607c4d5-1595-4956-90aa-3e5231cb1725-kube-api-access-wdxnq\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.421678 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1607c4d5-1595-4956-90aa-3e5231cb1725-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.421691 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mb7hx\" (UniqueName: \"kubernetes.io/projected/9dd7ada9-9067-4084-b1e2-145e8bca15a0-kube-api-access-mb7hx\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.421703 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9dd7ada9-9067-4084-b1e2-145e8bca15a0-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.422976 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-config\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.423827 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-client-ca\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.424484 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-serving-cert\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.436426 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhlg9\" (UniqueName: \"kubernetes.io/projected/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-kube-api-access-qhlg9\") pod \"route-controller-manager-675985f759-xvq8k\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.462150 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" event={"ID":"9dd7ada9-9067-4084-b1e2-145e8bca15a0","Type":"ContainerDied","Data":"a4acb5bbd4d189fe7945c05a3555a203ffdaae50895fbdf4850746cfddf92abb"} Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.462218 4751 scope.go:117] "RemoveContainer" containerID="dfdefc8245f85b78f779a7b9edbdabc9ca29e76a0ba55001d46d6a73ddf5974d" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.462176 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-59668987f7-k9dmx" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.465740 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" event={"ID":"1607c4d5-1595-4956-90aa-3e5231cb1725","Type":"ContainerDied","Data":"08bf5fa5554e14458467a4bfefd8e346729812e37bbd1af7a9bcc52acc3a32db"} Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.465823 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.498953 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-59668987f7-k9dmx"] Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.501971 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-59668987f7-k9dmx"] Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.509782 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn"] Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.513395 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-78cb796998-crfmn"] Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.518758 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.527507 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1607c4d5-1595-4956-90aa-3e5231cb1725" path="/var/lib/kubelet/pods/1607c4d5-1595-4956-90aa-3e5231cb1725/volumes" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.528114 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dd7ada9-9067-4084-b1e2-145e8bca15a0" path="/var/lib/kubelet/pods/9dd7ada9-9067-4084-b1e2-145e8bca15a0/volumes" Feb 27 16:28:16 crc kubenswrapper[4751]: I0227 16:28:16.882279 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.535134 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6cccfd8b4-qh29v"] Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.536160 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.539518 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.540864 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.541083 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.542312 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.542584 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.542811 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.546604 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.556778 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6cccfd8b4-qh29v"] Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.603776 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k"] Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.672166 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-proxy-ca-bundles\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.672261 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-client-ca\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.672318 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-config\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.672452 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-serving-cert\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.672521 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6m8p\" (UniqueName: \"kubernetes.io/projected/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-kube-api-access-w6m8p\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.773375 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-proxy-ca-bundles\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.773449 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-client-ca\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.773479 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-config\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.773502 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-serving-cert\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.773523 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6m8p\" (UniqueName: \"kubernetes.io/projected/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-kube-api-access-w6m8p\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.774517 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-proxy-ca-bundles\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.774633 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-config\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.774973 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-client-ca\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.791652 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6m8p\" (UniqueName: \"kubernetes.io/projected/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-kube-api-access-w6m8p\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.793549 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-serving-cert\") pod \"controller-manager-6cccfd8b4-qh29v\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:19 crc kubenswrapper[4751]: I0227 16:28:19.855139 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.402068 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.406360 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.406473 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.598808 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kube-api-access\") pod \"installer-9-crc\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.598880 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kubelet-dir\") pod \"installer-9-crc\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.599054 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-var-lock\") pod \"installer-9-crc\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.700079 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kube-api-access\") pod \"installer-9-crc\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.700207 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kubelet-dir\") pod \"installer-9-crc\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.700295 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-var-lock\") pod \"installer-9-crc\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.700313 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kubelet-dir\") pod \"installer-9-crc\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.700436 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-var-lock\") pod \"installer-9-crc\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.720222 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kube-api-access\") pod \"installer-9-crc\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:20 crc kubenswrapper[4751]: I0227 16:28:20.731366 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:28:28 crc kubenswrapper[4751]: I0227 16:28:28.918525 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:28:28 crc kubenswrapper[4751]: I0227 16:28:28.918932 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:28:32 crc kubenswrapper[4751]: E0227 16:28:32.711351 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 27 16:28:32 crc kubenswrapper[4751]: E0227 16:28:32.711745 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v4xdf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-c9mg9_openshift-marketplace(ed03128a-80cd-404b-991d-99f04fdab36e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 27 16:28:32 crc kubenswrapper[4751]: E0227 16:28:32.712914 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-c9mg9" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" Feb 27 16:28:34 crc kubenswrapper[4751]: E0227 16:28:34.294802 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-c9mg9" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" Feb 27 16:28:34 crc kubenswrapper[4751]: E0227 16:28:34.438744 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 27 16:28:34 crc kubenswrapper[4751]: E0227 16:28:34.438947 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w7gnl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-z4j9x_openshift-marketplace(361c2acb-bff0-4874-b92e-56f883281f35): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 27 16:28:34 crc kubenswrapper[4751]: E0227 16:28:34.440552 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-z4j9x" podUID="361c2acb-bff0-4874-b92e-56f883281f35" Feb 27 16:28:34 crc kubenswrapper[4751]: E0227 16:28:34.473056 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 27 16:28:34 crc kubenswrapper[4751]: E0227 16:28:34.473221 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q4k6v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-ksw46_openshift-marketplace(1c35558f-cd8a-4a04-baca-ea445d76b712): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 27 16:28:34 crc kubenswrapper[4751]: E0227 16:28:34.474380 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-ksw46" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" Feb 27 16:28:35 crc kubenswrapper[4751]: E0227 16:28:35.683286 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-z4j9x" podUID="361c2acb-bff0-4874-b92e-56f883281f35" Feb 27 16:28:35 crc kubenswrapper[4751]: E0227 16:28:35.683745 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-ksw46" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" Feb 27 16:28:35 crc kubenswrapper[4751]: I0227 16:28:35.698729 4751 scope.go:117] "RemoveContainer" containerID="d22f49f1cb688ffb4147644985cf27c8bb29f53bb841161f0cf9c2a109f18be1" Feb 27 16:28:35 crc kubenswrapper[4751]: I0227 16:28:35.846078 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536828-jvl6d"] Feb 27 16:28:35 crc kubenswrapper[4751]: E0227 16:28:35.863686 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 27 16:28:35 crc kubenswrapper[4751]: E0227 16:28:35.863853 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gkrts,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-k5lsc_openshift-marketplace(2d42be40-69b6-49a3-a4ad-ff74df0c284e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 27 16:28:35 crc kubenswrapper[4751]: E0227 16:28:35.865024 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-k5lsc" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" Feb 27 16:28:36 crc kubenswrapper[4751]: E0227 16:28:36.107081 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 27 16:28:36 crc kubenswrapper[4751]: E0227 16:28:36.107224 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5r66n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-94pcv_openshift-marketplace(7412acf1-544d-4fbb-a538-2071988c8ae1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 27 16:28:36 crc kubenswrapper[4751]: E0227 16:28:36.109045 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-94pcv" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" Feb 27 16:28:36 crc kubenswrapper[4751]: E0227 16:28:36.540664 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 27 16:28:36 crc kubenswrapper[4751]: E0227 16:28:36.541189 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-26mdd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-n8xtl_openshift-marketplace(317aef2b-3749-4a30-afc6-96f40516eae7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 27 16:28:36 crc kubenswrapper[4751]: E0227 16:28:36.542420 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-n8xtl" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" Feb 27 16:28:39 crc kubenswrapper[4751]: E0227 16:28:39.442781 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-n8xtl" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" Feb 27 16:28:39 crc kubenswrapper[4751]: E0227 16:28:39.443521 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-k5lsc" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" Feb 27 16:28:39 crc kubenswrapper[4751]: E0227 16:28:39.443558 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-94pcv" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" Feb 27 16:28:39 crc kubenswrapper[4751]: E0227 16:28:39.556725 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Feb 27 16:28:39 crc kubenswrapper[4751]: E0227 16:28:39.556980 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s4nql,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-r48bt_openshift-marketplace(cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 27 16:28:39 crc kubenswrapper[4751]: E0227 16:28:39.558990 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-r48bt" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" Feb 27 16:28:39 crc kubenswrapper[4751]: I0227 16:28:39.625783 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536828-jvl6d" event={"ID":"9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d","Type":"ContainerStarted","Data":"3641e2be059a0d1e280e2521361ff3f34455e57d5158cf0feb5c031fe75e9a6c"} Feb 27 16:28:39 crc kubenswrapper[4751]: E0227 16:28:39.629811 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-r48bt" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" Feb 27 16:28:39 crc kubenswrapper[4751]: I0227 16:28:39.821380 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 27 16:28:39 crc kubenswrapper[4751]: W0227 16:28:39.841162 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podd1978137_fa2f_4172_af3f_bfb0a02c88b7.slice/crio-8075c9e3ee78a81ac038fd2b1651f0b962b130b1d5cea18ff238eb290615e22b WatchSource:0}: Error finding container 8075c9e3ee78a81ac038fd2b1651f0b962b130b1d5cea18ff238eb290615e22b: Status 404 returned error can't find the container with id 8075c9e3ee78a81ac038fd2b1651f0b962b130b1d5cea18ff238eb290615e22b Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.092476 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6cccfd8b4-qh29v"] Feb 27 16:28:40 crc kubenswrapper[4751]: W0227 16:28:40.098066 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20b7ca4c_52b1_4a15_a1d7_92daa5fca86a.slice/crio-207ac66cea29660d61679c0ceb474d732f4eedeff11b5641c1a1a22c680f983b WatchSource:0}: Error finding container 207ac66cea29660d61679c0ceb474d732f4eedeff11b5641c1a1a22c680f983b: Status 404 returned error can't find the container with id 207ac66cea29660d61679c0ceb474d732f4eedeff11b5641c1a1a22c680f983b Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.143217 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k"] Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.145836 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 27 16:28:40 crc kubenswrapper[4751]: W0227 16:28:40.157955 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podfbbfa35f_11f5_4a0e_b65e_6ca317880932.slice/crio-6f16ab4fe0447ec52f398c5425136231fefadaa5cf229b2db160d45c9fd5a558 WatchSource:0}: Error finding container 6f16ab4fe0447ec52f398c5425136231fefadaa5cf229b2db160d45c9fd5a558: Status 404 returned error can't find the container with id 6f16ab4fe0447ec52f398c5425136231fefadaa5cf229b2db160d45c9fd5a558 Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.321800 4751 csr.go:261] certificate signing request csr-84ct6 is approved, waiting to be issued Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.328380 4751 csr.go:257] certificate signing request csr-84ct6 is issued Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.632286 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d1978137-fa2f-4172-af3f-bfb0a02c88b7","Type":"ContainerStarted","Data":"e2bfd132259f4bbbe637076a618121735f1b5bb339b7dc7ad549cd7761ce20c6"} Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.632951 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d1978137-fa2f-4172-af3f-bfb0a02c88b7","Type":"ContainerStarted","Data":"8075c9e3ee78a81ac038fd2b1651f0b962b130b1d5cea18ff238eb290615e22b"} Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.637844 4751 generic.go:334] "Generic (PLEG): container finished" podID="11a526fe-64f1-4da8-a0e8-ed276ec069fb" containerID="12487aad43858aa1a0f16a992d3cafe9baa676f8aab31655ccbb907cb250a2e3" exitCode=0 Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.637958 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536826-mxq7k" event={"ID":"11a526fe-64f1-4da8-a0e8-ed276ec069fb","Type":"ContainerDied","Data":"12487aad43858aa1a0f16a992d3cafe9baa676f8aab31655ccbb907cb250a2e3"} Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.641794 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"fbbfa35f-11f5-4a0e-b65e-6ca317880932","Type":"ContainerStarted","Data":"02fd660ade808a149fd3838e849deb7eb3697a802bcff28080d5bfec9c4eb7e5"} Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.641833 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"fbbfa35f-11f5-4a0e-b65e-6ca317880932","Type":"ContainerStarted","Data":"6f16ab4fe0447ec52f398c5425136231fefadaa5cf229b2db160d45c9fd5a558"} Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.643661 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" event={"ID":"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46","Type":"ContainerStarted","Data":"7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032"} Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.643682 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" event={"ID":"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46","Type":"ContainerStarted","Data":"3fc4d1174f6e2bd551ffe599dfb6d062e571f73865230275f25dc87652e531a9"} Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.643775 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" podUID="b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" containerName="route-controller-manager" containerID="cri-o://7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032" gracePeriod=30 Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.643884 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.652317 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=25.652296469 podStartE2EDuration="25.652296469s" podCreationTimestamp="2026-02-27 16:28:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:28:40.649771321 +0000 UTC m=+282.796785768" watchObservedRunningTime="2026-02-27 16:28:40.652296469 +0000 UTC m=+282.799310916" Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.653203 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" event={"ID":"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a","Type":"ContainerStarted","Data":"7a7c9fb9ed91bf6ff7343981775f13feb9998e740f6763fdbe6d8c1bbb0f9d55"} Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.653291 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" event={"ID":"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a","Type":"ContainerStarted","Data":"207ac66cea29660d61679c0ceb474d732f4eedeff11b5641c1a1a22c680f983b"} Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.654458 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.667648 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.673986 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" podStartSLOduration=41.673965151 podStartE2EDuration="41.673965151s" podCreationTimestamp="2026-02-27 16:27:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:28:40.669435749 +0000 UTC m=+282.816450196" watchObservedRunningTime="2026-02-27 16:28:40.673965151 +0000 UTC m=+282.820979598" Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.721446 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" podStartSLOduration=21.721430225 podStartE2EDuration="21.721430225s" podCreationTimestamp="2026-02-27 16:28:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:28:40.719677448 +0000 UTC m=+282.866691895" watchObservedRunningTime="2026-02-27 16:28:40.721430225 +0000 UTC m=+282.868444672" Feb 27 16:28:40 crc kubenswrapper[4751]: I0227 16:28:40.722051 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=20.722046771 podStartE2EDuration="20.722046771s" podCreationTimestamp="2026-02-27 16:28:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:28:40.70224691 +0000 UTC m=+282.849261347" watchObservedRunningTime="2026-02-27 16:28:40.722046771 +0000 UTC m=+282.869061218" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.025926 4751 patch_prober.go:28] interesting pod/route-controller-manager-675985f759-xvq8k container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": read tcp 10.217.0.2:48522->10.217.0.60:8443: read: connection reset by peer" start-of-body= Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.026363 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" podUID="b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": read tcp 10.217.0.2:48522->10.217.0.60:8443: read: connection reset by peer" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.330512 4751 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-02-24 05:54:36 +0000 UTC, rotation deadline is 2026-11-11 01:06:22.499239065 +0000 UTC Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.330948 4751 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6152h37m41.168294467s for next certificate rotation Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.376939 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-675985f759-xvq8k_b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46/route-controller-manager/0.log" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.377016 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.403506 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq"] Feb 27 16:28:41 crc kubenswrapper[4751]: E0227 16:28:41.403810 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" containerName="route-controller-manager" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.403825 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" containerName="route-controller-manager" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.403964 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" containerName="route-controller-manager" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.404426 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.411159 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq"] Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.500357 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-serving-cert\") pod \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.500494 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhlg9\" (UniqueName: \"kubernetes.io/projected/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-kube-api-access-qhlg9\") pod \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.500775 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-client-ca\") pod \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.500865 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-config\") pod \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\" (UID: \"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46\") " Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.500966 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-config\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.501031 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83d8b2a4-6ad2-487f-8044-f6e009050edc-serving-cert\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.501082 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srdzs\" (UniqueName: \"kubernetes.io/projected/83d8b2a4-6ad2-487f-8044-f6e009050edc-kube-api-access-srdzs\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.501113 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-client-ca\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.501669 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-client-ca" (OuterVolumeSpecName: "client-ca") pod "b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" (UID: "b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.501964 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-config" (OuterVolumeSpecName: "config") pod "b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" (UID: "b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.506453 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" (UID: "b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.506548 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-kube-api-access-qhlg9" (OuterVolumeSpecName: "kube-api-access-qhlg9") pod "b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" (UID: "b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46"). InnerVolumeSpecName "kube-api-access-qhlg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.602203 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-config\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.602274 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83d8b2a4-6ad2-487f-8044-f6e009050edc-serving-cert\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.602313 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srdzs\" (UniqueName: \"kubernetes.io/projected/83d8b2a4-6ad2-487f-8044-f6e009050edc-kube-api-access-srdzs\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.602336 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-client-ca\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.602384 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.602393 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.602425 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhlg9\" (UniqueName: \"kubernetes.io/projected/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-kube-api-access-qhlg9\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.602434 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.603425 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-client-ca\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.604567 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-config\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.610369 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83d8b2a4-6ad2-487f-8044-f6e009050edc-serving-cert\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.626829 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srdzs\" (UniqueName: \"kubernetes.io/projected/83d8b2a4-6ad2-487f-8044-f6e009050edc-kube-api-access-srdzs\") pod \"route-controller-manager-7d77759655-j4kxq\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.660677 4751 generic.go:334] "Generic (PLEG): container finished" podID="9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d" containerID="0a159ba30f7fbe4dd02231bb3ba0d1e8d73a807b15e69daabdbd8f1e250283d8" exitCode=0 Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.660748 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536828-jvl6d" event={"ID":"9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d","Type":"ContainerDied","Data":"0a159ba30f7fbe4dd02231bb3ba0d1e8d73a807b15e69daabdbd8f1e250283d8"} Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.661979 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-675985f759-xvq8k_b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46/route-controller-manager/0.log" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.662010 4751 generic.go:334] "Generic (PLEG): container finished" podID="b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" containerID="7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032" exitCode=255 Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.662042 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" event={"ID":"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46","Type":"ContainerDied","Data":"7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032"} Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.662058 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" event={"ID":"b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46","Type":"ContainerDied","Data":"3fc4d1174f6e2bd551ffe599dfb6d062e571f73865230275f25dc87652e531a9"} Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.662073 4751 scope.go:117] "RemoveContainer" containerID="7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.662164 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.664473 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nf7jn" event={"ID":"b5d14efb-d682-4728-846f-3b379fe8d390","Type":"ContainerStarted","Data":"ba5d9ab1542f717c7252ba837e6747b998e659f841cb508d0a8b9be099825522"} Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.665974 4751 generic.go:334] "Generic (PLEG): container finished" podID="d1978137-fa2f-4172-af3f-bfb0a02c88b7" containerID="e2bfd132259f4bbbe637076a618121735f1b5bb339b7dc7ad549cd7761ce20c6" exitCode=0 Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.666496 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d1978137-fa2f-4172-af3f-bfb0a02c88b7","Type":"ContainerDied","Data":"e2bfd132259f4bbbe637076a618121735f1b5bb339b7dc7ad549cd7761ce20c6"} Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.701728 4751 scope.go:117] "RemoveContainer" containerID="7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032" Feb 27 16:28:41 crc kubenswrapper[4751]: E0227 16:28:41.702273 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032\": container with ID starting with 7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032 not found: ID does not exist" containerID="7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.702325 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032"} err="failed to get container status \"7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032\": rpc error: code = NotFound desc = could not find container \"7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032\": container with ID starting with 7aaa0f0a7617d33840bae99aef45f2089199f1e1f163a5d522b8b9ff5076e032 not found: ID does not exist" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.731295 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k"] Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.737778 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-675985f759-xvq8k"] Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.738853 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.911823 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536826-mxq7k" Feb 27 16:28:41 crc kubenswrapper[4751]: I0227 16:28:41.968155 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq"] Feb 27 16:28:41 crc kubenswrapper[4751]: W0227 16:28:41.974326 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83d8b2a4_6ad2_487f_8044_f6e009050edc.slice/crio-4120790dbc693a4f32bea9755f401030cddcd2f6ba6f06d8f7baab2e841ec88b WatchSource:0}: Error finding container 4120790dbc693a4f32bea9755f401030cddcd2f6ba6f06d8f7baab2e841ec88b: Status 404 returned error can't find the container with id 4120790dbc693a4f32bea9755f401030cddcd2f6ba6f06d8f7baab2e841ec88b Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.109070 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7bgh\" (UniqueName: \"kubernetes.io/projected/11a526fe-64f1-4da8-a0e8-ed276ec069fb-kube-api-access-m7bgh\") pod \"11a526fe-64f1-4da8-a0e8-ed276ec069fb\" (UID: \"11a526fe-64f1-4da8-a0e8-ed276ec069fb\") " Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.116475 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11a526fe-64f1-4da8-a0e8-ed276ec069fb-kube-api-access-m7bgh" (OuterVolumeSpecName: "kube-api-access-m7bgh") pod "11a526fe-64f1-4da8-a0e8-ed276ec069fb" (UID: "11a526fe-64f1-4da8-a0e8-ed276ec069fb"). InnerVolumeSpecName "kube-api-access-m7bgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.210970 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7bgh\" (UniqueName: \"kubernetes.io/projected/11a526fe-64f1-4da8-a0e8-ed276ec069fb-kube-api-access-m7bgh\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.332117 4751 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-02-24 05:54:36 +0000 UTC, rotation deadline is 2026-11-10 12:47:13.707338158 +0000 UTC Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.332171 4751 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6140h18m31.375170081s for next certificate rotation Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.529617 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46" path="/var/lib/kubelet/pods/b8ba25fc-6d0e-4c37-9910-1e8bf38b4e46/volumes" Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.697888 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536826-mxq7k" event={"ID":"11a526fe-64f1-4da8-a0e8-ed276ec069fb","Type":"ContainerDied","Data":"0eb57bbe98032acdbd5c77d394759db5911f655ff6cf9dcc4de3c5d6ee154733"} Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.698212 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0eb57bbe98032acdbd5c77d394759db5911f655ff6cf9dcc4de3c5d6ee154733" Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.697992 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536826-mxq7k" Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.701134 4751 generic.go:334] "Generic (PLEG): container finished" podID="b5d14efb-d682-4728-846f-3b379fe8d390" containerID="ba5d9ab1542f717c7252ba837e6747b998e659f841cb508d0a8b9be099825522" exitCode=0 Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.701187 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nf7jn" event={"ID":"b5d14efb-d682-4728-846f-3b379fe8d390","Type":"ContainerDied","Data":"ba5d9ab1542f717c7252ba837e6747b998e659f841cb508d0a8b9be099825522"} Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.708220 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" event={"ID":"83d8b2a4-6ad2-487f-8044-f6e009050edc","Type":"ContainerStarted","Data":"bac6c8105c744a581e3881e4f2d5bea4ce649ac6feb6da7ea0befd69a9ea4c32"} Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.708269 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" event={"ID":"83d8b2a4-6ad2-487f-8044-f6e009050edc","Type":"ContainerStarted","Data":"4120790dbc693a4f32bea9755f401030cddcd2f6ba6f06d8f7baab2e841ec88b"} Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.709455 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.740690 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.787592 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" podStartSLOduration=23.787576473 podStartE2EDuration="23.787576473s" podCreationTimestamp="2026-02-27 16:28:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:28:42.784230763 +0000 UTC m=+284.931245220" watchObservedRunningTime="2026-02-27 16:28:42.787576473 +0000 UTC m=+284.934590920" Feb 27 16:28:42 crc kubenswrapper[4751]: I0227 16:28:42.841331 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-dvwqp"] Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.049827 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536828-jvl6d" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.195188 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dmf8\" (UniqueName: \"kubernetes.io/projected/9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d-kube-api-access-6dmf8\") pod \"9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d\" (UID: \"9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d\") " Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.200649 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d-kube-api-access-6dmf8" (OuterVolumeSpecName: "kube-api-access-6dmf8") pod "9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d" (UID: "9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d"). InnerVolumeSpecName "kube-api-access-6dmf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.238802 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.297026 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dmf8\" (UniqueName: \"kubernetes.io/projected/9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d-kube-api-access-6dmf8\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.397806 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kube-api-access\") pod \"d1978137-fa2f-4172-af3f-bfb0a02c88b7\" (UID: \"d1978137-fa2f-4172-af3f-bfb0a02c88b7\") " Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.397861 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kubelet-dir\") pod \"d1978137-fa2f-4172-af3f-bfb0a02c88b7\" (UID: \"d1978137-fa2f-4172-af3f-bfb0a02c88b7\") " Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.398093 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d1978137-fa2f-4172-af3f-bfb0a02c88b7" (UID: "d1978137-fa2f-4172-af3f-bfb0a02c88b7"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.401583 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d1978137-fa2f-4172-af3f-bfb0a02c88b7" (UID: "d1978137-fa2f-4172-af3f-bfb0a02c88b7"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.499660 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.500113 4751 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d1978137-fa2f-4172-af3f-bfb0a02c88b7-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.716209 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536828-jvl6d" event={"ID":"9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d","Type":"ContainerDied","Data":"3641e2be059a0d1e280e2521361ff3f34455e57d5158cf0feb5c031fe75e9a6c"} Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.716247 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3641e2be059a0d1e280e2521361ff3f34455e57d5158cf0feb5c031fe75e9a6c" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.716262 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536828-jvl6d" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.718084 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nf7jn" event={"ID":"b5d14efb-d682-4728-846f-3b379fe8d390","Type":"ContainerStarted","Data":"1e7899a3f5bfd584e5bfa26bb49c9c99a703418bcfee2db5bb88d01a294f8b12"} Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.721601 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.721725 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d1978137-fa2f-4172-af3f-bfb0a02c88b7","Type":"ContainerDied","Data":"8075c9e3ee78a81ac038fd2b1651f0b962b130b1d5cea18ff238eb290615e22b"} Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.721767 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8075c9e3ee78a81ac038fd2b1651f0b962b130b1d5cea18ff238eb290615e22b" Feb 27 16:28:43 crc kubenswrapper[4751]: I0227 16:28:43.736060 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nf7jn" podStartSLOduration=2.829357639 podStartE2EDuration="1m0.736041305s" podCreationTimestamp="2026-02-27 16:27:43 +0000 UTC" firstStartedPulling="2026-02-27 16:27:45.229012123 +0000 UTC m=+227.376026570" lastFinishedPulling="2026-02-27 16:28:43.135695789 +0000 UTC m=+285.282710236" observedRunningTime="2026-02-27 16:28:43.733017564 +0000 UTC m=+285.880032011" watchObservedRunningTime="2026-02-27 16:28:43.736041305 +0000 UTC m=+285.883055752" Feb 27 16:28:44 crc kubenswrapper[4751]: I0227 16:28:44.227718 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:28:44 crc kubenswrapper[4751]: I0227 16:28:44.227772 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:28:45 crc kubenswrapper[4751]: I0227 16:28:45.478105 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nf7jn" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" containerName="registry-server" probeResult="failure" output=< Feb 27 16:28:45 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 16:28:45 crc kubenswrapper[4751]: > Feb 27 16:28:49 crc kubenswrapper[4751]: I0227 16:28:49.750983 4751 generic.go:334] "Generic (PLEG): container finished" podID="361c2acb-bff0-4874-b92e-56f883281f35" containerID="c71c1b54691a48b68eda5ae7abc7249934e54e1d65a8976d3732c183be92e73a" exitCode=0 Feb 27 16:28:49 crc kubenswrapper[4751]: I0227 16:28:49.751065 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4j9x" event={"ID":"361c2acb-bff0-4874-b92e-56f883281f35","Type":"ContainerDied","Data":"c71c1b54691a48b68eda5ae7abc7249934e54e1d65a8976d3732c183be92e73a"} Feb 27 16:28:49 crc kubenswrapper[4751]: I0227 16:28:49.755339 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c9mg9" event={"ID":"ed03128a-80cd-404b-991d-99f04fdab36e","Type":"ContainerStarted","Data":"4f6fe2d6d734857006a42debebd278dcce9dbb483777fec32a6280a0eeaded03"} Feb 27 16:28:50 crc kubenswrapper[4751]: I0227 16:28:50.765998 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4j9x" event={"ID":"361c2acb-bff0-4874-b92e-56f883281f35","Type":"ContainerStarted","Data":"7ed3e81e8fe8797ec694f47835e1420a10059bd36ffacecd59b68b460ce5eec1"} Feb 27 16:28:50 crc kubenswrapper[4751]: I0227 16:28:50.768881 4751 generic.go:334] "Generic (PLEG): container finished" podID="ed03128a-80cd-404b-991d-99f04fdab36e" containerID="4f6fe2d6d734857006a42debebd278dcce9dbb483777fec32a6280a0eeaded03" exitCode=0 Feb 27 16:28:50 crc kubenswrapper[4751]: I0227 16:28:50.768909 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c9mg9" event={"ID":"ed03128a-80cd-404b-991d-99f04fdab36e","Type":"ContainerDied","Data":"4f6fe2d6d734857006a42debebd278dcce9dbb483777fec32a6280a0eeaded03"} Feb 27 16:28:50 crc kubenswrapper[4751]: I0227 16:28:50.783452 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z4j9x" podStartSLOduration=3.655581646 podStartE2EDuration="1m10.783387738s" podCreationTimestamp="2026-02-27 16:27:40 +0000 UTC" firstStartedPulling="2026-02-27 16:27:43.030487825 +0000 UTC m=+225.177502272" lastFinishedPulling="2026-02-27 16:28:50.158293917 +0000 UTC m=+292.305308364" observedRunningTime="2026-02-27 16:28:50.780930212 +0000 UTC m=+292.927944659" watchObservedRunningTime="2026-02-27 16:28:50.783387738 +0000 UTC m=+292.930402175" Feb 27 16:28:50 crc kubenswrapper[4751]: I0227 16:28:50.820563 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:28:50 crc kubenswrapper[4751]: I0227 16:28:50.820602 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:28:51 crc kubenswrapper[4751]: I0227 16:28:51.780845 4751 generic.go:334] "Generic (PLEG): container finished" podID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerID="d447c09062902c914e9ac4fc2a58c1393094af8fb4c73c6a9426d45aa8ee066e" exitCode=0 Feb 27 16:28:51 crc kubenswrapper[4751]: I0227 16:28:51.780985 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksw46" event={"ID":"1c35558f-cd8a-4a04-baca-ea445d76b712","Type":"ContainerDied","Data":"d447c09062902c914e9ac4fc2a58c1393094af8fb4c73c6a9426d45aa8ee066e"} Feb 27 16:28:51 crc kubenswrapper[4751]: I0227 16:28:51.784955 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c9mg9" event={"ID":"ed03128a-80cd-404b-991d-99f04fdab36e","Type":"ContainerStarted","Data":"a80e11264f36459990061e60d0b622b87d004322d069cbab7fa233b86af562ac"} Feb 27 16:28:51 crc kubenswrapper[4751]: I0227 16:28:51.873343 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-z4j9x" podUID="361c2acb-bff0-4874-b92e-56f883281f35" containerName="registry-server" probeResult="failure" output=< Feb 27 16:28:51 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 16:28:51 crc kubenswrapper[4751]: > Feb 27 16:28:51 crc kubenswrapper[4751]: I0227 16:28:51.874857 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-c9mg9" podStartSLOduration=3.671990508 podStartE2EDuration="1m11.874842519s" podCreationTimestamp="2026-02-27 16:27:40 +0000 UTC" firstStartedPulling="2026-02-27 16:27:43.014147657 +0000 UTC m=+225.161162104" lastFinishedPulling="2026-02-27 16:28:51.216999678 +0000 UTC m=+293.364014115" observedRunningTime="2026-02-27 16:28:51.873875363 +0000 UTC m=+294.020889810" watchObservedRunningTime="2026-02-27 16:28:51.874842519 +0000 UTC m=+294.021856966" Feb 27 16:28:54 crc kubenswrapper[4751]: I0227 16:28:54.317871 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:28:54 crc kubenswrapper[4751]: I0227 16:28:54.357388 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:28:54 crc kubenswrapper[4751]: I0227 16:28:54.978552 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nf7jn"] Feb 27 16:28:55 crc kubenswrapper[4751]: I0227 16:28:55.814105 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nf7jn" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" containerName="registry-server" containerID="cri-o://1e7899a3f5bfd584e5bfa26bb49c9c99a703418bcfee2db5bb88d01a294f8b12" gracePeriod=2 Feb 27 16:28:56 crc kubenswrapper[4751]: I0227 16:28:56.821154 4751 generic.go:334] "Generic (PLEG): container finished" podID="b5d14efb-d682-4728-846f-3b379fe8d390" containerID="1e7899a3f5bfd584e5bfa26bb49c9c99a703418bcfee2db5bb88d01a294f8b12" exitCode=0 Feb 27 16:28:56 crc kubenswrapper[4751]: I0227 16:28:56.821241 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nf7jn" event={"ID":"b5d14efb-d682-4728-846f-3b379fe8d390","Type":"ContainerDied","Data":"1e7899a3f5bfd584e5bfa26bb49c9c99a703418bcfee2db5bb88d01a294f8b12"} Feb 27 16:28:56 crc kubenswrapper[4751]: I0227 16:28:56.822879 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5lsc" event={"ID":"2d42be40-69b6-49a3-a4ad-ff74df0c284e","Type":"ContainerStarted","Data":"b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f"} Feb 27 16:28:56 crc kubenswrapper[4751]: I0227 16:28:56.825350 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r48bt" event={"ID":"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0","Type":"ContainerStarted","Data":"a356988ecfdd3ffab597de21eb5d1e582904600d03089de82cbd78e43f0851a0"} Feb 27 16:28:56 crc kubenswrapper[4751]: I0227 16:28:56.841825 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksw46" event={"ID":"1c35558f-cd8a-4a04-baca-ea445d76b712","Type":"ContainerStarted","Data":"1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2"} Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.207328 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.382042 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-utilities\") pod \"b5d14efb-d682-4728-846f-3b379fe8d390\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.382131 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5vxn\" (UniqueName: \"kubernetes.io/projected/b5d14efb-d682-4728-846f-3b379fe8d390-kube-api-access-w5vxn\") pod \"b5d14efb-d682-4728-846f-3b379fe8d390\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.382218 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-catalog-content\") pod \"b5d14efb-d682-4728-846f-3b379fe8d390\" (UID: \"b5d14efb-d682-4728-846f-3b379fe8d390\") " Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.382740 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-utilities" (OuterVolumeSpecName: "utilities") pod "b5d14efb-d682-4728-846f-3b379fe8d390" (UID: "b5d14efb-d682-4728-846f-3b379fe8d390"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.384966 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.396040 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5d14efb-d682-4728-846f-3b379fe8d390-kube-api-access-w5vxn" (OuterVolumeSpecName: "kube-api-access-w5vxn") pod "b5d14efb-d682-4728-846f-3b379fe8d390" (UID: "b5d14efb-d682-4728-846f-3b379fe8d390"). InnerVolumeSpecName "kube-api-access-w5vxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.486525 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5vxn\" (UniqueName: \"kubernetes.io/projected/b5d14efb-d682-4728-846f-3b379fe8d390-kube-api-access-w5vxn\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.506591 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b5d14efb-d682-4728-846f-3b379fe8d390" (UID: "b5d14efb-d682-4728-846f-3b379fe8d390"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.588212 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5d14efb-d682-4728-846f-3b379fe8d390-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.854498 4751 generic.go:334] "Generic (PLEG): container finished" podID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerID="b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f" exitCode=0 Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.854584 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5lsc" event={"ID":"2d42be40-69b6-49a3-a4ad-ff74df0c284e","Type":"ContainerDied","Data":"b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f"} Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.858809 4751 generic.go:334] "Generic (PLEG): container finished" podID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerID="a356988ecfdd3ffab597de21eb5d1e582904600d03089de82cbd78e43f0851a0" exitCode=0 Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.858872 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r48bt" event={"ID":"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0","Type":"ContainerDied","Data":"a356988ecfdd3ffab597de21eb5d1e582904600d03089de82cbd78e43f0851a0"} Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.861024 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nf7jn" event={"ID":"b5d14efb-d682-4728-846f-3b379fe8d390","Type":"ContainerDied","Data":"31a63462f83966616c04d1a59a8c549cd7eebd12e7c01513d90fcefec277e8fd"} Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.861071 4751 scope.go:117] "RemoveContainer" containerID="1e7899a3f5bfd584e5bfa26bb49c9c99a703418bcfee2db5bb88d01a294f8b12" Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.861091 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nf7jn" Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.923813 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ksw46" podStartSLOduration=3.732154238 podStartE2EDuration="1m17.923798889s" podCreationTimestamp="2026-02-27 16:27:40 +0000 UTC" firstStartedPulling="2026-02-27 16:27:41.839542409 +0000 UTC m=+223.986556856" lastFinishedPulling="2026-02-27 16:28:56.03118706 +0000 UTC m=+298.178201507" observedRunningTime="2026-02-27 16:28:57.920887661 +0000 UTC m=+300.067902108" watchObservedRunningTime="2026-02-27 16:28:57.923798889 +0000 UTC m=+300.070813336" Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.934130 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nf7jn"] Feb 27 16:28:57 crc kubenswrapper[4751]: I0227 16:28:57.937553 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nf7jn"] Feb 27 16:28:58 crc kubenswrapper[4751]: I0227 16:28:58.484487 4751 scope.go:117] "RemoveContainer" containerID="ba5d9ab1542f717c7252ba837e6747b998e659f841cb508d0a8b9be099825522" Feb 27 16:28:58 crc kubenswrapper[4751]: I0227 16:28:58.549115 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" path="/var/lib/kubelet/pods/b5d14efb-d682-4728-846f-3b379fe8d390/volumes" Feb 27 16:28:58 crc kubenswrapper[4751]: I0227 16:28:58.919128 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:28:58 crc kubenswrapper[4751]: I0227 16:28:58.920067 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:28:58 crc kubenswrapper[4751]: I0227 16:28:58.920192 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:28:58 crc kubenswrapper[4751]: I0227 16:28:58.921373 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 16:28:58 crc kubenswrapper[4751]: I0227 16:28:58.921588 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154" gracePeriod=600 Feb 27 16:28:59 crc kubenswrapper[4751]: I0227 16:28:59.543200 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6cccfd8b4-qh29v"] Feb 27 16:28:59 crc kubenswrapper[4751]: I0227 16:28:59.543512 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" podUID="20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" containerName="controller-manager" containerID="cri-o://7a7c9fb9ed91bf6ff7343981775f13feb9998e740f6763fdbe6d8c1bbb0f9d55" gracePeriod=30 Feb 27 16:28:59 crc kubenswrapper[4751]: I0227 16:28:59.626050 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq"] Feb 27 16:28:59 crc kubenswrapper[4751]: I0227 16:28:59.626332 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" podUID="83d8b2a4-6ad2-487f-8044-f6e009050edc" containerName="route-controller-manager" containerID="cri-o://bac6c8105c744a581e3881e4f2d5bea4ce649ac6feb6da7ea0befd69a9ea4c32" gracePeriod=30 Feb 27 16:28:59 crc kubenswrapper[4751]: I0227 16:28:59.856502 4751 patch_prober.go:28] interesting pod/controller-manager-6cccfd8b4-qh29v container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Feb 27 16:28:59 crc kubenswrapper[4751]: I0227 16:28:59.856588 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" podUID="20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Feb 27 16:28:59 crc kubenswrapper[4751]: I0227 16:28:59.879913 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154" exitCode=0 Feb 27 16:28:59 crc kubenswrapper[4751]: I0227 16:28:59.879975 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154"} Feb 27 16:29:00 crc kubenswrapper[4751]: I0227 16:29:00.437746 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:29:00 crc kubenswrapper[4751]: I0227 16:29:00.437807 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:29:00 crc kubenswrapper[4751]: I0227 16:29:00.459769 4751 scope.go:117] "RemoveContainer" containerID="f58eac2c5701eabd37430918ddacbf31d945e58b18eda53efe05405b81a252eb" Feb 27 16:29:00 crc kubenswrapper[4751]: I0227 16:29:00.510467 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:29:00 crc kubenswrapper[4751]: I0227 16:29:00.889458 4751 generic.go:334] "Generic (PLEG): container finished" podID="20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" containerID="7a7c9fb9ed91bf6ff7343981775f13feb9998e740f6763fdbe6d8c1bbb0f9d55" exitCode=0 Feb 27 16:29:00 crc kubenswrapper[4751]: I0227 16:29:00.889640 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:29:00 crc kubenswrapper[4751]: I0227 16:29:00.890111 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" event={"ID":"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a","Type":"ContainerDied","Data":"7a7c9fb9ed91bf6ff7343981775f13feb9998e740f6763fdbe6d8c1bbb0f9d55"} Feb 27 16:29:00 crc kubenswrapper[4751]: I0227 16:29:00.894058 4751 generic.go:334] "Generic (PLEG): container finished" podID="83d8b2a4-6ad2-487f-8044-f6e009050edc" containerID="bac6c8105c744a581e3881e4f2d5bea4ce649ac6feb6da7ea0befd69a9ea4c32" exitCode=0 Feb 27 16:29:00 crc kubenswrapper[4751]: I0227 16:29:00.894119 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" event={"ID":"83d8b2a4-6ad2-487f-8044-f6e009050edc","Type":"ContainerDied","Data":"bac6c8105c744a581e3881e4f2d5bea4ce649ac6feb6da7ea0befd69a9ea4c32"} Feb 27 16:29:00 crc kubenswrapper[4751]: I0227 16:29:00.934896 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.071620 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.071676 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.137075 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.901609 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" event={"ID":"83d8b2a4-6ad2-487f-8044-f6e009050edc","Type":"ContainerDied","Data":"4120790dbc693a4f32bea9755f401030cddcd2f6ba6f06d8f7baab2e841ec88b"} Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.902021 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4120790dbc693a4f32bea9755f401030cddcd2f6ba6f06d8f7baab2e841ec88b" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.935625 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.965994 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.967848 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb"] Feb 27 16:29:01 crc kubenswrapper[4751]: E0227 16:29:01.968106 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" containerName="extract-content" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968119 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" containerName="extract-content" Feb 27 16:29:01 crc kubenswrapper[4751]: E0227 16:29:01.968134 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" containerName="extract-utilities" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968141 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" containerName="extract-utilities" Feb 27 16:29:01 crc kubenswrapper[4751]: E0227 16:29:01.968158 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d" containerName="oc" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968166 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d" containerName="oc" Feb 27 16:29:01 crc kubenswrapper[4751]: E0227 16:29:01.968180 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83d8b2a4-6ad2-487f-8044-f6e009050edc" containerName="route-controller-manager" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968187 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="83d8b2a4-6ad2-487f-8044-f6e009050edc" containerName="route-controller-manager" Feb 27 16:29:01 crc kubenswrapper[4751]: E0227 16:29:01.968199 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" containerName="registry-server" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968206 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" containerName="registry-server" Feb 27 16:29:01 crc kubenswrapper[4751]: E0227 16:29:01.968220 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11a526fe-64f1-4da8-a0e8-ed276ec069fb" containerName="oc" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968226 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="11a526fe-64f1-4da8-a0e8-ed276ec069fb" containerName="oc" Feb 27 16:29:01 crc kubenswrapper[4751]: E0227 16:29:01.968236 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1978137-fa2f-4172-af3f-bfb0a02c88b7" containerName="pruner" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968243 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1978137-fa2f-4172-af3f-bfb0a02c88b7" containerName="pruner" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968363 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="83d8b2a4-6ad2-487f-8044-f6e009050edc" containerName="route-controller-manager" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968380 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="11a526fe-64f1-4da8-a0e8-ed276ec069fb" containerName="oc" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968391 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d" containerName="oc" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968423 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5d14efb-d682-4728-846f-3b379fe8d390" containerName="registry-server" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968438 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1978137-fa2f-4172-af3f-bfb0a02c88b7" containerName="pruner" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.968861 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:01 crc kubenswrapper[4751]: I0227 16:29:01.980248 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb"] Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.061924 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-client-ca\") pod \"83d8b2a4-6ad2-487f-8044-f6e009050edc\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.062032 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83d8b2a4-6ad2-487f-8044-f6e009050edc-serving-cert\") pod \"83d8b2a4-6ad2-487f-8044-f6e009050edc\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.062060 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-config\") pod \"83d8b2a4-6ad2-487f-8044-f6e009050edc\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.062098 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srdzs\" (UniqueName: \"kubernetes.io/projected/83d8b2a4-6ad2-487f-8044-f6e009050edc-kube-api-access-srdzs\") pod \"83d8b2a4-6ad2-487f-8044-f6e009050edc\" (UID: \"83d8b2a4-6ad2-487f-8044-f6e009050edc\") " Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.062245 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-config\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.062292 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-client-ca\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.062348 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b48f83f9-1144-4354-9150-e425bda781bc-serving-cert\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.062522 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grw5j\" (UniqueName: \"kubernetes.io/projected/b48f83f9-1144-4354-9150-e425bda781bc-kube-api-access-grw5j\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.062924 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-client-ca" (OuterVolumeSpecName: "client-ca") pod "83d8b2a4-6ad2-487f-8044-f6e009050edc" (UID: "83d8b2a4-6ad2-487f-8044-f6e009050edc"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.062938 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-config" (OuterVolumeSpecName: "config") pod "83d8b2a4-6ad2-487f-8044-f6e009050edc" (UID: "83d8b2a4-6ad2-487f-8044-f6e009050edc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.067521 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83d8b2a4-6ad2-487f-8044-f6e009050edc-kube-api-access-srdzs" (OuterVolumeSpecName: "kube-api-access-srdzs") pod "83d8b2a4-6ad2-487f-8044-f6e009050edc" (UID: "83d8b2a4-6ad2-487f-8044-f6e009050edc"). InnerVolumeSpecName "kube-api-access-srdzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.068424 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83d8b2a4-6ad2-487f-8044-f6e009050edc-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "83d8b2a4-6ad2-487f-8044-f6e009050edc" (UID: "83d8b2a4-6ad2-487f-8044-f6e009050edc"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.164003 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grw5j\" (UniqueName: \"kubernetes.io/projected/b48f83f9-1144-4354-9150-e425bda781bc-kube-api-access-grw5j\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.164084 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-config\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.164121 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-client-ca\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.164151 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b48f83f9-1144-4354-9150-e425bda781bc-serving-cert\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.164201 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83d8b2a4-6ad2-487f-8044-f6e009050edc-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.164213 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.164326 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srdzs\" (UniqueName: \"kubernetes.io/projected/83d8b2a4-6ad2-487f-8044-f6e009050edc-kube-api-access-srdzs\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.164528 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/83d8b2a4-6ad2-487f-8044-f6e009050edc-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.165277 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-client-ca\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.167587 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-config\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.176131 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b48f83f9-1144-4354-9150-e425bda781bc-serving-cert\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.180388 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grw5j\" (UniqueName: \"kubernetes.io/projected/b48f83f9-1144-4354-9150-e425bda781bc-kube-api-access-grw5j\") pod \"route-controller-manager-f94c788f6-95jxb\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.287744 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.740477 4751 patch_prober.go:28] interesting pod/route-controller-manager-7d77759655-j4kxq container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.63:8443/healthz\": dial tcp 10.217.0.63:8443: i/o timeout (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.740588 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" podUID="83d8b2a4-6ad2-487f-8044-f6e009050edc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.63:8443/healthz\": dial tcp 10.217.0.63:8443: i/o timeout (Client.Timeout exceeded while awaiting headers)" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.776793 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z4j9x"] Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.910082 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq" Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.911174 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z4j9x" podUID="361c2acb-bff0-4874-b92e-56f883281f35" containerName="registry-server" containerID="cri-o://7ed3e81e8fe8797ec694f47835e1420a10059bd36ffacecd59b68b460ce5eec1" gracePeriod=2 Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.933318 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq"] Feb 27 16:29:02 crc kubenswrapper[4751]: I0227 16:29:02.937250 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d77759655-j4kxq"] Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.130602 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.281507 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6m8p\" (UniqueName: \"kubernetes.io/projected/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-kube-api-access-w6m8p\") pod \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.282038 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-client-ca\") pod \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.282095 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-config\") pod \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.282126 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-serving-cert\") pod \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.282155 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-proxy-ca-bundles\") pod \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\" (UID: \"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a\") " Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.283207 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" (UID: "20b7ca4c-52b1-4a15-a1d7-92daa5fca86a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.283320 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-config" (OuterVolumeSpecName: "config") pod "20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" (UID: "20b7ca4c-52b1-4a15-a1d7-92daa5fca86a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.283359 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-client-ca" (OuterVolumeSpecName: "client-ca") pod "20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" (UID: "20b7ca4c-52b1-4a15-a1d7-92daa5fca86a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.285072 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-kube-api-access-w6m8p" (OuterVolumeSpecName: "kube-api-access-w6m8p") pod "20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" (UID: "20b7ca4c-52b1-4a15-a1d7-92daa5fca86a"). InnerVolumeSpecName "kube-api-access-w6m8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.287116 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" (UID: "20b7ca4c-52b1-4a15-a1d7-92daa5fca86a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.384461 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6m8p\" (UniqueName: \"kubernetes.io/projected/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-kube-api-access-w6m8p\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.384515 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.384535 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.384553 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.384570 4751 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.779176 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c9mg9"] Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.927637 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-c9mg9" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" containerName="registry-server" containerID="cri-o://a80e11264f36459990061e60d0b622b87d004322d069cbab7fa233b86af562ac" gracePeriod=2 Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.928148 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.928747 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6cccfd8b4-qh29v" event={"ID":"20b7ca4c-52b1-4a15-a1d7-92daa5fca86a","Type":"ContainerDied","Data":"207ac66cea29660d61679c0ceb474d732f4eedeff11b5641c1a1a22c680f983b"} Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.928842 4751 scope.go:117] "RemoveContainer" containerID="7a7c9fb9ed91bf6ff7343981775f13feb9998e740f6763fdbe6d8c1bbb0f9d55" Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.972689 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6cccfd8b4-qh29v"] Feb 27 16:29:03 crc kubenswrapper[4751]: I0227 16:29:03.993092 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6cccfd8b4-qh29v"] Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.545163 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" path="/var/lib/kubelet/pods/20b7ca4c-52b1-4a15-a1d7-92daa5fca86a/volumes" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.546290 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83d8b2a4-6ad2-487f-8044-f6e009050edc" path="/var/lib/kubelet/pods/83d8b2a4-6ad2-487f-8044-f6e009050edc/volumes" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.622971 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck"] Feb 27 16:29:04 crc kubenswrapper[4751]: E0227 16:29:04.623372 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" containerName="controller-manager" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.623397 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" containerName="controller-manager" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.623631 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="20b7ca4c-52b1-4a15-a1d7-92daa5fca86a" containerName="controller-manager" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.624263 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.627160 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.627754 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.628293 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.628583 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.629507 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.637180 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.638563 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.641513 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck"] Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.806024 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-config\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.806119 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaacc455-81c3-4f96-91ef-2b45ff0017d2-serving-cert\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.806211 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-proxy-ca-bundles\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.806259 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trtdq\" (UniqueName: \"kubernetes.io/projected/aaacc455-81c3-4f96-91ef-2b45ff0017d2-kube-api-access-trtdq\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.806388 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-client-ca\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.907225 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-config\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.907273 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaacc455-81c3-4f96-91ef-2b45ff0017d2-serving-cert\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.907328 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-proxy-ca-bundles\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.907357 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trtdq\" (UniqueName: \"kubernetes.io/projected/aaacc455-81c3-4f96-91ef-2b45ff0017d2-kube-api-access-trtdq\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.907413 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-client-ca\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.909842 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-client-ca\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.910599 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-proxy-ca-bundles\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.910859 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-config\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.913327 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaacc455-81c3-4f96-91ef-2b45ff0017d2-serving-cert\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.929513 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trtdq\" (UniqueName: \"kubernetes.io/projected/aaacc455-81c3-4f96-91ef-2b45ff0017d2-kube-api-access-trtdq\") pod \"controller-manager-6d9fcf5cbd-mzpck\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:04 crc kubenswrapper[4751]: I0227 16:29:04.944955 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:05 crc kubenswrapper[4751]: I0227 16:29:05.944844 4751 generic.go:334] "Generic (PLEG): container finished" podID="ed03128a-80cd-404b-991d-99f04fdab36e" containerID="a80e11264f36459990061e60d0b622b87d004322d069cbab7fa233b86af562ac" exitCode=0 Feb 27 16:29:05 crc kubenswrapper[4751]: I0227 16:29:05.944974 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c9mg9" event={"ID":"ed03128a-80cd-404b-991d-99f04fdab36e","Type":"ContainerDied","Data":"a80e11264f36459990061e60d0b622b87d004322d069cbab7fa233b86af562ac"} Feb 27 16:29:05 crc kubenswrapper[4751]: I0227 16:29:05.949051 4751 generic.go:334] "Generic (PLEG): container finished" podID="361c2acb-bff0-4874-b92e-56f883281f35" containerID="7ed3e81e8fe8797ec694f47835e1420a10059bd36ffacecd59b68b460ce5eec1" exitCode=0 Feb 27 16:29:05 crc kubenswrapper[4751]: I0227 16:29:05.949127 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4j9x" event={"ID":"361c2acb-bff0-4874-b92e-56f883281f35","Type":"ContainerDied","Data":"7ed3e81e8fe8797ec694f47835e1420a10059bd36ffacecd59b68b460ce5eec1"} Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.142063 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.242229 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-utilities\") pod \"ed03128a-80cd-404b-991d-99f04fdab36e\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.242352 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-catalog-content\") pod \"ed03128a-80cd-404b-991d-99f04fdab36e\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.242378 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4xdf\" (UniqueName: \"kubernetes.io/projected/ed03128a-80cd-404b-991d-99f04fdab36e-kube-api-access-v4xdf\") pod \"ed03128a-80cd-404b-991d-99f04fdab36e\" (UID: \"ed03128a-80cd-404b-991d-99f04fdab36e\") " Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.243983 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-utilities" (OuterVolumeSpecName: "utilities") pod "ed03128a-80cd-404b-991d-99f04fdab36e" (UID: "ed03128a-80cd-404b-991d-99f04fdab36e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.252871 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed03128a-80cd-404b-991d-99f04fdab36e-kube-api-access-v4xdf" (OuterVolumeSpecName: "kube-api-access-v4xdf") pod "ed03128a-80cd-404b-991d-99f04fdab36e" (UID: "ed03128a-80cd-404b-991d-99f04fdab36e"). InnerVolumeSpecName "kube-api-access-v4xdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.257345 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.306229 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ed03128a-80cd-404b-991d-99f04fdab36e" (UID: "ed03128a-80cd-404b-991d-99f04fdab36e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.343242 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-utilities\") pod \"361c2acb-bff0-4874-b92e-56f883281f35\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.343483 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7gnl\" (UniqueName: \"kubernetes.io/projected/361c2acb-bff0-4874-b92e-56f883281f35-kube-api-access-w7gnl\") pod \"361c2acb-bff0-4874-b92e-56f883281f35\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.343553 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-catalog-content\") pod \"361c2acb-bff0-4874-b92e-56f883281f35\" (UID: \"361c2acb-bff0-4874-b92e-56f883281f35\") " Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.343819 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.343842 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed03128a-80cd-404b-991d-99f04fdab36e-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.343853 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4xdf\" (UniqueName: \"kubernetes.io/projected/ed03128a-80cd-404b-991d-99f04fdab36e-kube-api-access-v4xdf\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.344113 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-utilities" (OuterVolumeSpecName: "utilities") pod "361c2acb-bff0-4874-b92e-56f883281f35" (UID: "361c2acb-bff0-4874-b92e-56f883281f35"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.346791 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/361c2acb-bff0-4874-b92e-56f883281f35-kube-api-access-w7gnl" (OuterVolumeSpecName: "kube-api-access-w7gnl") pod "361c2acb-bff0-4874-b92e-56f883281f35" (UID: "361c2acb-bff0-4874-b92e-56f883281f35"). InnerVolumeSpecName "kube-api-access-w7gnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.416121 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "361c2acb-bff0-4874-b92e-56f883281f35" (UID: "361c2acb-bff0-4874-b92e-56f883281f35"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.445220 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7gnl\" (UniqueName: \"kubernetes.io/projected/361c2acb-bff0-4874-b92e-56f883281f35-kube-api-access-w7gnl\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.445296 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.445446 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/361c2acb-bff0-4874-b92e-56f883281f35-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.907343 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" podUID="d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" containerName="oauth-openshift" containerID="cri-o://7951bf3551c34edcc37720b2a3862a129ac15eaad78f62464abbcc3807c4212e" gracePeriod=15 Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.970620 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c9mg9" event={"ID":"ed03128a-80cd-404b-991d-99f04fdab36e","Type":"ContainerDied","Data":"42150a2aebab6e59384ccbe9a8a0cff9801983fe556bb6bac3073c9de588394a"} Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.970698 4751 scope.go:117] "RemoveContainer" containerID="a80e11264f36459990061e60d0b622b87d004322d069cbab7fa233b86af562ac" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.971106 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c9mg9" Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.974979 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4j9x" event={"ID":"361c2acb-bff0-4874-b92e-56f883281f35","Type":"ContainerDied","Data":"6220ce410a4bab54dde7045c8dbc5c0bd1e2dce7fe4c0ccdf4caf7e261d71faf"} Feb 27 16:29:07 crc kubenswrapper[4751]: I0227 16:29:07.975127 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4j9x" Feb 27 16:29:08 crc kubenswrapper[4751]: I0227 16:29:08.013632 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z4j9x"] Feb 27 16:29:08 crc kubenswrapper[4751]: I0227 16:29:08.015266 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z4j9x"] Feb 27 16:29:08 crc kubenswrapper[4751]: I0227 16:29:08.031692 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c9mg9"] Feb 27 16:29:08 crc kubenswrapper[4751]: I0227 16:29:08.035782 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-c9mg9"] Feb 27 16:29:08 crc kubenswrapper[4751]: I0227 16:29:08.534291 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="361c2acb-bff0-4874-b92e-56f883281f35" path="/var/lib/kubelet/pods/361c2acb-bff0-4874-b92e-56f883281f35/volumes" Feb 27 16:29:08 crc kubenswrapper[4751]: I0227 16:29:08.535881 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" path="/var/lib/kubelet/pods/ed03128a-80cd-404b-991d-99f04fdab36e/volumes" Feb 27 16:29:08 crc kubenswrapper[4751]: I0227 16:29:08.983898 4751 generic.go:334] "Generic (PLEG): container finished" podID="d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" containerID="7951bf3551c34edcc37720b2a3862a129ac15eaad78f62464abbcc3807c4212e" exitCode=0 Feb 27 16:29:08 crc kubenswrapper[4751]: I0227 16:29:08.984030 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" event={"ID":"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3","Type":"ContainerDied","Data":"7951bf3551c34edcc37720b2a3862a129ac15eaad78f62464abbcc3807c4212e"} Feb 27 16:29:10 crc kubenswrapper[4751]: I0227 16:29:10.496612 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:29:11 crc kubenswrapper[4751]: I0227 16:29:11.728154 4751 scope.go:117] "RemoveContainer" containerID="4f6fe2d6d734857006a42debebd278dcce9dbb483777fec32a6280a0eeaded03" Feb 27 16:29:11 crc kubenswrapper[4751]: I0227 16:29:11.839615 4751 scope.go:117] "RemoveContainer" containerID="455853a70d4b28a45fdd33d075329c0a36a935e54e42133306fd8bf0f8541d4b" Feb 27 16:29:11 crc kubenswrapper[4751]: I0227 16:29:11.909328 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:29:11 crc kubenswrapper[4751]: I0227 16:29:11.943378 4751 scope.go:117] "RemoveContainer" containerID="7ed3e81e8fe8797ec694f47835e1420a10059bd36ffacecd59b68b460ce5eec1" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.002553 4751 scope.go:117] "RemoveContainer" containerID="c71c1b54691a48b68eda5ae7abc7249934e54e1d65a8976d3732c183be92e73a" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.008167 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"0a576cb428bc8c6c7aa4cc0e1673e4d3aa049a82e2ef3e79581b883ffcfca488"} Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012686 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-ocp-branding-template\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012721 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-cliconfig\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012749 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cvth\" (UniqueName: \"kubernetes.io/projected/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-kube-api-access-9cvth\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012767 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-session\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012795 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-router-certs\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012818 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-idp-0-file-data\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012841 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-policies\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012865 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-serving-cert\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012893 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-dir\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012920 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-trusted-ca-bundle\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012946 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-login\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012966 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-error\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.012991 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-service-ca\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.013019 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-provider-selection\") pod \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\" (UID: \"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3\") " Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.014144 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.014692 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.015212 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.016052 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.019681 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.019993 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" event={"ID":"d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3","Type":"ContainerDied","Data":"7c38d54f6c21245fb0ecce63d398f4e9fdf3aef16d5b5b434202dbd5ad189327"} Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.020182 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-dvwqp" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.020302 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.024856 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.028677 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-kube-api-access-9cvth" (OuterVolumeSpecName: "kube-api-access-9cvth") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "kube-api-access-9cvth". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.030899 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.035048 4751 scope.go:117] "RemoveContainer" containerID="7b408989dbd455d0eaee07b63ffeca0b23f11c6e80fdf8904637b2f3ec27ecf1" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.035623 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.037341 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.038232 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.039147 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.043979 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" (UID: "d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.072701 4751 scope.go:117] "RemoveContainer" containerID="7951bf3551c34edcc37720b2a3862a129ac15eaad78f62464abbcc3807c4212e" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.087625 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck"] Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114305 4751 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114337 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114348 4751 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-audit-dir\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114366 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114378 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114391 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114418 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114454 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114468 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114478 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114489 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cvth\" (UniqueName: \"kubernetes.io/projected/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-kube-api-access-9cvth\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114499 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114509 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.114521 4751 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.185394 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb"] Feb 27 16:29:12 crc kubenswrapper[4751]: W0227 16:29:12.239974 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb48f83f9_1144_4354_9150_e425bda781bc.slice/crio-97641da05dad6914f2bdd81b0b9a11a08cbac7cff1e5a1429b07143e081a3198 WatchSource:0}: Error finding container 97641da05dad6914f2bdd81b0b9a11a08cbac7cff1e5a1429b07143e081a3198: Status 404 returned error can't find the container with id 97641da05dad6914f2bdd81b0b9a11a08cbac7cff1e5a1429b07143e081a3198 Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.348600 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-dvwqp"] Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.352207 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-dvwqp"] Feb 27 16:29:12 crc kubenswrapper[4751]: I0227 16:29:12.526970 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" path="/var/lib/kubelet/pods/d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3/volumes" Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.034915 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5lsc" event={"ID":"2d42be40-69b6-49a3-a4ad-ff74df0c284e","Type":"ContainerStarted","Data":"e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67"} Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.036507 4751 generic.go:334] "Generic (PLEG): container finished" podID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerID="0ceac9ff4debb69a58c25fa3a347c8ed1cefe8d647e8b0a88066b001de4d57d1" exitCode=0 Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.036571 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-94pcv" event={"ID":"7412acf1-544d-4fbb-a538-2071988c8ae1","Type":"ContainerDied","Data":"0ceac9ff4debb69a58c25fa3a347c8ed1cefe8d647e8b0a88066b001de4d57d1"} Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.038465 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r48bt" event={"ID":"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0","Type":"ContainerStarted","Data":"e2e37554a3c5799746ecbb9b307a55d39109492c3ae5401366da7cd7237fc649"} Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.042093 4751 generic.go:334] "Generic (PLEG): container finished" podID="317aef2b-3749-4a30-afc6-96f40516eae7" containerID="15ddfd4810d51df8bfa730d66d27f9aca3da0d582099579a99938920675f90c8" exitCode=0 Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.042147 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8xtl" event={"ID":"317aef2b-3749-4a30-afc6-96f40516eae7","Type":"ContainerDied","Data":"15ddfd4810d51df8bfa730d66d27f9aca3da0d582099579a99938920675f90c8"} Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.043318 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" event={"ID":"b48f83f9-1144-4354-9150-e425bda781bc","Type":"ContainerStarted","Data":"724468fedc84bce446b4f7075ccbe9a7751b7c3fcf0020eafb72eda9a3ecc0e9"} Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.043349 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" event={"ID":"b48f83f9-1144-4354-9150-e425bda781bc","Type":"ContainerStarted","Data":"97641da05dad6914f2bdd81b0b9a11a08cbac7cff1e5a1429b07143e081a3198"} Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.043427 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.045324 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" event={"ID":"aaacc455-81c3-4f96-91ef-2b45ff0017d2","Type":"ContainerStarted","Data":"0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3"} Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.045354 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.045363 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" event={"ID":"aaacc455-81c3-4f96-91ef-2b45ff0017d2","Type":"ContainerStarted","Data":"c83a36615d73d21ba0a84ebbae594e97ddd0d9019302ee0ef74a8a3e8f0d4237"} Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.048474 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.049966 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.056471 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-k5lsc" podStartSLOduration=3.304535047 podStartE2EDuration="1m31.05645434s" podCreationTimestamp="2026-02-27 16:27:42 +0000 UTC" firstStartedPulling="2026-02-27 16:27:44.097388961 +0000 UTC m=+226.244403408" lastFinishedPulling="2026-02-27 16:29:11.849308254 +0000 UTC m=+313.996322701" observedRunningTime="2026-02-27 16:29:13.053964223 +0000 UTC m=+315.200978670" watchObservedRunningTime="2026-02-27 16:29:13.05645434 +0000 UTC m=+315.203468787" Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.073981 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" podStartSLOduration=14.07396666 podStartE2EDuration="14.07396666s" podCreationTimestamp="2026-02-27 16:28:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:29:13.071074112 +0000 UTC m=+315.218088549" watchObservedRunningTime="2026-02-27 16:29:13.07396666 +0000 UTC m=+315.220981107" Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.091601 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" podStartSLOduration=14.091586003 podStartE2EDuration="14.091586003s" podCreationTimestamp="2026-02-27 16:28:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:29:13.089438965 +0000 UTC m=+315.236453412" watchObservedRunningTime="2026-02-27 16:29:13.091586003 +0000 UTC m=+315.238600450" Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.148635 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r48bt" podStartSLOduration=3.517904657 podStartE2EDuration="1m30.148596734s" podCreationTimestamp="2026-02-27 16:27:43 +0000 UTC" firstStartedPulling="2026-02-27 16:27:45.209999745 +0000 UTC m=+227.357014192" lastFinishedPulling="2026-02-27 16:29:11.840691782 +0000 UTC m=+313.987706269" observedRunningTime="2026-02-27 16:29:13.146624381 +0000 UTC m=+315.293638828" watchObservedRunningTime="2026-02-27 16:29:13.148596734 +0000 UTC m=+315.295630161" Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.858150 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:29:13 crc kubenswrapper[4751]: I0227 16:29:13.858592 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:29:14 crc kubenswrapper[4751]: I0227 16:29:14.052563 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-94pcv" event={"ID":"7412acf1-544d-4fbb-a538-2071988c8ae1","Type":"ContainerStarted","Data":"70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc"} Feb 27 16:29:14 crc kubenswrapper[4751]: I0227 16:29:14.055351 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8xtl" event={"ID":"317aef2b-3749-4a30-afc6-96f40516eae7","Type":"ContainerStarted","Data":"491e99996e1d9d684128abd09f5e7ce6477de0245c550dd66ee1b301c746f5e9"} Feb 27 16:29:14 crc kubenswrapper[4751]: I0227 16:29:14.078858 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-94pcv" podStartSLOduration=2.524913009 podStartE2EDuration="1m34.078820606s" podCreationTimestamp="2026-02-27 16:27:40 +0000 UTC" firstStartedPulling="2026-02-27 16:27:41.916789424 +0000 UTC m=+224.063803871" lastFinishedPulling="2026-02-27 16:29:13.470697021 +0000 UTC m=+315.617711468" observedRunningTime="2026-02-27 16:29:14.073846053 +0000 UTC m=+316.220860510" watchObservedRunningTime="2026-02-27 16:29:14.078820606 +0000 UTC m=+316.225835063" Feb 27 16:29:14 crc kubenswrapper[4751]: I0227 16:29:14.095568 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-n8xtl" podStartSLOduration=1.611961934 podStartE2EDuration="1m32.095544755s" podCreationTimestamp="2026-02-27 16:27:42 +0000 UTC" firstStartedPulling="2026-02-27 16:27:43.047807299 +0000 UTC m=+225.194821746" lastFinishedPulling="2026-02-27 16:29:13.53139012 +0000 UTC m=+315.678404567" observedRunningTime="2026-02-27 16:29:14.094520138 +0000 UTC m=+316.241534585" watchObservedRunningTime="2026-02-27 16:29:14.095544755 +0000 UTC m=+316.242559202" Feb 27 16:29:14 crc kubenswrapper[4751]: I0227 16:29:14.900991 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-r48bt" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerName="registry-server" probeResult="failure" output=< Feb 27 16:29:14 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 16:29:14 crc kubenswrapper[4751]: > Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.113613 4751 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.114061 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="361c2acb-bff0-4874-b92e-56f883281f35" containerName="extract-utilities" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114077 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="361c2acb-bff0-4874-b92e-56f883281f35" containerName="extract-utilities" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.114095 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" containerName="extract-utilities" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114104 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" containerName="extract-utilities" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.114117 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" containerName="extract-content" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114126 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" containerName="extract-content" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.114139 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" containerName="oauth-openshift" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114147 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" containerName="oauth-openshift" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.114160 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="361c2acb-bff0-4874-b92e-56f883281f35" containerName="extract-content" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114170 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="361c2acb-bff0-4874-b92e-56f883281f35" containerName="extract-content" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.114183 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="361c2acb-bff0-4874-b92e-56f883281f35" containerName="registry-server" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114191 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="361c2acb-bff0-4874-b92e-56f883281f35" containerName="registry-server" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.114201 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" containerName="registry-server" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114210 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" containerName="registry-server" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114340 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8b87a6b-b1cf-4f0f-b46e-9a580676f9d3" containerName="oauth-openshift" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114361 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="361c2acb-bff0-4874-b92e-56f883281f35" containerName="registry-server" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114370 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed03128a-80cd-404b-991d-99f04fdab36e" containerName="registry-server" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114768 4751 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.114954 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115169 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129" gracePeriod=15 Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115294 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542" gracePeriod=15 Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115335 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f" gracePeriod=15 Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115369 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560" gracePeriod=15 Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115385 4751 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115421 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb" gracePeriod=15 Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.115557 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115570 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.115580 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115590 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.115605 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115614 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.115624 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115634 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.115644 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115653 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.115666 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115675 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.115685 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115693 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.115705 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115715 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115827 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115838 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115847 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115858 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115866 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115875 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115887 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.115900 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.116007 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.116017 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.116033 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.116043 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.116159 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.127662 4751 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.153968 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.300386 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.300981 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.301009 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.301034 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.301063 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.301157 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.301205 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.301351 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402150 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402206 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402251 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402296 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402312 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402327 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402317 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402384 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402381 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402345 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402455 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402475 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402555 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402615 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402615 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.402675 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.451700 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:29:18 crc kubenswrapper[4751]: W0227 16:29:18.480914 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-f2b0df662f56959a7051039932cbe83686ac7606d9e50e680455290f5f78b348 WatchSource:0}: Error finding container f2b0df662f56959a7051039932cbe83686ac7606d9e50e680455290f5f78b348: Status 404 returned error can't find the container with id f2b0df662f56959a7051039932cbe83686ac7606d9e50e680455290f5f78b348 Feb 27 16:29:18 crc kubenswrapper[4751]: E0227 16:29:18.487884 4751 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.64:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18982762c1edc91c openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:29:18.484154652 +0000 UTC m=+320.631169089,LastTimestamp:2026-02-27 16:29:18.484154652 +0000 UTC m=+320.631169089,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:29:18 crc kubenswrapper[4751]: I0227 16:29:18.526971 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.088906 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/3.log" Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.090974 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.092060 4751 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542" exitCode=0 Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.092092 4751 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f" exitCode=0 Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.092102 4751 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560" exitCode=0 Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.092112 4751 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb" exitCode=2 Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.092200 4751 scope.go:117] "RemoveContainer" containerID="cc6e0cd173bcdbf69c0f6c2dd67903b9ddae0e4aeec7f4785aa83b18a64fc631" Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.094131 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa"} Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.094166 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"f2b0df662f56959a7051039932cbe83686ac7606d9e50e680455290f5f78b348"} Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.094913 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.097879 4751 generic.go:334] "Generic (PLEG): container finished" podID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" containerID="02fd660ade808a149fd3838e849deb7eb3697a802bcff28080d5bfec9c4eb7e5" exitCode=0 Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.097897 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"fbbfa35f-11f5-4a0e-b65e-6ca317880932","Type":"ContainerDied","Data":"02fd660ade808a149fd3838e849deb7eb3697a802bcff28080d5bfec9c4eb7e5"} Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.098783 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:19 crc kubenswrapper[4751]: I0227 16:29:19.099209 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.109638 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.561793 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.563229 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.563860 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.568644 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.569685 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.570487 4751 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.570770 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.571140 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.627383 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.627523 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.676113 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.677024 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.677689 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.678297 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.678884 4751 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.742579 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.742977 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-var-lock\") pod \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.743011 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.743055 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kube-api-access\") pod \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.743146 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kubelet-dir\") pod \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\" (UID: \"fbbfa35f-11f5-4a0e-b65e-6ca317880932\") " Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.743204 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.742705 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.743568 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.743683 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.743690 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-var-lock" (OuterVolumeSpecName: "var-lock") pod "fbbfa35f-11f5-4a0e-b65e-6ca317880932" (UID: "fbbfa35f-11f5-4a0e-b65e-6ca317880932"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.744513 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "fbbfa35f-11f5-4a0e-b65e-6ca317880932" (UID: "fbbfa35f-11f5-4a0e-b65e-6ca317880932"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.752521 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "fbbfa35f-11f5-4a0e-b65e-6ca317880932" (UID: "fbbfa35f-11f5-4a0e-b65e-6ca317880932"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.845243 4751 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-var-lock\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.845296 4751 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.845314 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.845334 4751 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fbbfa35f-11f5-4a0e-b65e-6ca317880932-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.845351 4751 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:20 crc kubenswrapper[4751]: I0227 16:29:20.845368 4751 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.122834 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.123803 4751 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129" exitCode=0 Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.123877 4751 scope.go:117] "RemoveContainer" containerID="a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.123961 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.127788 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.127900 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"fbbfa35f-11f5-4a0e-b65e-6ca317880932","Type":"ContainerDied","Data":"6f16ab4fe0447ec52f398c5425136231fefadaa5cf229b2db160d45c9fd5a558"} Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.127945 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f16ab4fe0447ec52f398c5425136231fefadaa5cf229b2db160d45c9fd5a558" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.143214 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.143690 4751 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.144163 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.144611 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.151097 4751 scope.go:117] "RemoveContainer" containerID="c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.154384 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.154846 4751 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.155391 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.156437 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.169751 4751 scope.go:117] "RemoveContainer" containerID="025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.172035 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.172606 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.173385 4751 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.173967 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.174263 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.191569 4751 scope.go:117] "RemoveContainer" containerID="09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.213057 4751 scope.go:117] "RemoveContainer" containerID="ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.235880 4751 scope.go:117] "RemoveContainer" containerID="7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.264782 4751 scope.go:117] "RemoveContainer" containerID="a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542" Feb 27 16:29:21 crc kubenswrapper[4751]: E0227 16:29:21.265168 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\": container with ID starting with a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542 not found: ID does not exist" containerID="a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.265220 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542"} err="failed to get container status \"a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\": rpc error: code = NotFound desc = could not find container \"a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542\": container with ID starting with a9855ac4b5c63a4e7baec04ca08c3c48bf2ff66c343e1e418611008ed8e99542 not found: ID does not exist" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.265258 4751 scope.go:117] "RemoveContainer" containerID="c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f" Feb 27 16:29:21 crc kubenswrapper[4751]: E0227 16:29:21.265866 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\": container with ID starting with c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f not found: ID does not exist" containerID="c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.265889 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f"} err="failed to get container status \"c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\": rpc error: code = NotFound desc = could not find container \"c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f\": container with ID starting with c2bbf6c3ae480e34ee64f300e067787c1b2c61236675e7da09273f3b7110691f not found: ID does not exist" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.265906 4751 scope.go:117] "RemoveContainer" containerID="025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560" Feb 27 16:29:21 crc kubenswrapper[4751]: E0227 16:29:21.266312 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\": container with ID starting with 025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560 not found: ID does not exist" containerID="025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.266339 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560"} err="failed to get container status \"025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\": rpc error: code = NotFound desc = could not find container \"025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560\": container with ID starting with 025854e8e0ca2f6a5f98372dba18fffcb5cdf76dfbd303abd5514997c95aa560 not found: ID does not exist" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.266363 4751 scope.go:117] "RemoveContainer" containerID="09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb" Feb 27 16:29:21 crc kubenswrapper[4751]: E0227 16:29:21.267884 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\": container with ID starting with 09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb not found: ID does not exist" containerID="09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.267951 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb"} err="failed to get container status \"09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\": rpc error: code = NotFound desc = could not find container \"09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb\": container with ID starting with 09690a6d83cd53f9ce8f1963a3bdb73709e0908af795125ecc11cf069a3300eb not found: ID does not exist" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.267973 4751 scope.go:117] "RemoveContainer" containerID="ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129" Feb 27 16:29:21 crc kubenswrapper[4751]: E0227 16:29:21.268562 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\": container with ID starting with ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129 not found: ID does not exist" containerID="ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.268597 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129"} err="failed to get container status \"ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\": rpc error: code = NotFound desc = could not find container \"ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129\": container with ID starting with ba7381a5191d9a38ada6992819d5065e9abeadc8b187b8b4573be86c9b57f129 not found: ID does not exist" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.268618 4751 scope.go:117] "RemoveContainer" containerID="7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd" Feb 27 16:29:21 crc kubenswrapper[4751]: E0227 16:29:21.268931 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\": container with ID starting with 7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd not found: ID does not exist" containerID="7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd" Feb 27 16:29:21 crc kubenswrapper[4751]: I0227 16:29:21.268966 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd"} err="failed to get container status \"7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\": rpc error: code = NotFound desc = could not find container \"7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd\": container with ID starting with 7feea78014dd0317b7e552cbe93f536597bd2f5b73772bd03f99a71d002773bd not found: ID does not exist" Feb 27 16:29:22 crc kubenswrapper[4751]: E0227 16:29:22.190737 4751 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.64:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18982762c1edc91c openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:29:18.484154652 +0000 UTC m=+320.631169089,LastTimestamp:2026-02-27 16:29:18.484154652 +0000 UTC m=+320.631169089,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.420964 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.421020 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.479531 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.480178 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.480568 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.481031 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.481434 4751 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.481778 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.528912 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.827147 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.827202 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.895349 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.895866 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.896272 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.896865 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.897118 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:22 crc kubenswrapper[4751]: I0227 16:29:22.897489 4751 status_manager.go:851] "Failed to get status for pod" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" pod="openshift-marketplace/redhat-marketplace-k5lsc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-k5lsc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.202036 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.202857 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.203490 4751 status_manager.go:851] "Failed to get status for pod" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" pod="openshift-marketplace/redhat-marketplace-k5lsc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-k5lsc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.203986 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.204323 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.204843 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.207221 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.207955 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.208537 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.208883 4751 status_manager.go:851] "Failed to get status for pod" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" pod="openshift-marketplace/redhat-marketplace-k5lsc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-k5lsc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.209368 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.209824 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: E0227 16:29:23.707532 4751 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: E0227 16:29:23.708477 4751 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: E0227 16:29:23.708982 4751 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: E0227 16:29:23.709453 4751 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: E0227 16:29:23.709974 4751 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.710024 4751 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Feb 27 16:29:23 crc kubenswrapper[4751]: E0227 16:29:23.710427 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="200ms" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.904180 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.905731 4751 status_manager.go:851] "Failed to get status for pod" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" pod="openshift-marketplace/redhat-marketplace-k5lsc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-k5lsc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.906216 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.906647 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.907078 4751 status_manager.go:851] "Failed to get status for pod" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" pod="openshift-marketplace/redhat-operators-r48bt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r48bt\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.907494 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.907848 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: E0227 16:29:23.910854 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="400ms" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.950254 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.950879 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.951602 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.952054 4751 status_manager.go:851] "Failed to get status for pod" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" pod="openshift-marketplace/redhat-operators-r48bt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r48bt\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.952677 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.953127 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:23 crc kubenswrapper[4751]: I0227 16:29:23.953611 4751 status_manager.go:851] "Failed to get status for pod" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" pod="openshift-marketplace/redhat-marketplace-k5lsc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-k5lsc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:24 crc kubenswrapper[4751]: E0227 16:29:24.312421 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="800ms" Feb 27 16:29:25 crc kubenswrapper[4751]: E0227 16:29:25.113772 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="1.6s" Feb 27 16:29:26 crc kubenswrapper[4751]: E0227 16:29:26.715494 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="3.2s" Feb 27 16:29:28 crc kubenswrapper[4751]: I0227 16:29:28.525819 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:28 crc kubenswrapper[4751]: I0227 16:29:28.526379 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:28 crc kubenswrapper[4751]: I0227 16:29:28.526925 4751 status_manager.go:851] "Failed to get status for pod" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" pod="openshift-marketplace/redhat-operators-r48bt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r48bt\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:28 crc kubenswrapper[4751]: I0227 16:29:28.527338 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:28 crc kubenswrapper[4751]: I0227 16:29:28.527697 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:28 crc kubenswrapper[4751]: I0227 16:29:28.528351 4751 status_manager.go:851] "Failed to get status for pod" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" pod="openshift-marketplace/redhat-marketplace-k5lsc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-k5lsc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:29 crc kubenswrapper[4751]: E0227 16:29:29.917062 4751 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" interval="6.4s" Feb 27 16:29:32 crc kubenswrapper[4751]: E0227 16:29:32.192659 4751 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.64:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18982762c1edc91c openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-27 16:29:18.484154652 +0000 UTC m=+320.631169089,LastTimestamp:2026-02-27 16:29:18.484154652 +0000 UTC m=+320.631169089,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.216646 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/cluster-policy-controller/0.log" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.217766 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.217821 4751 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821" exitCode=1 Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.217855 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821"} Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.218369 4751 scope.go:117] "RemoveContainer" containerID="48bd9202bf8ec4c7579698f2e002923313c105d9109b5053f95ce32ae76c2821" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.218745 4751 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.219413 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.219845 4751 status_manager.go:851] "Failed to get status for pod" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" pod="openshift-marketplace/redhat-marketplace-k5lsc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-k5lsc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.220115 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.220528 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.220912 4751 status_manager.go:851] "Failed to get status for pod" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" pod="openshift-marketplace/redhat-operators-r48bt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r48bt\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.221257 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.520457 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.521252 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.521645 4751 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.522084 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.522289 4751 status_manager.go:851] "Failed to get status for pod" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" pod="openshift-marketplace/redhat-marketplace-k5lsc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-k5lsc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.522488 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.522652 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.522825 4751 status_manager.go:851] "Failed to get status for pod" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" pod="openshift-marketplace/redhat-operators-r48bt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r48bt\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.542328 4751 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="66626c61-3f6b-48d2-92e6-a061f0c0a2bb" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.542658 4751 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="66626c61-3f6b-48d2-92e6-a061f0c0a2bb" Feb 27 16:29:33 crc kubenswrapper[4751]: E0227 16:29:33.543161 4751 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:33 crc kubenswrapper[4751]: I0227 16:29:33.543629 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:33 crc kubenswrapper[4751]: W0227 16:29:33.567898 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-4d03140b7769d9bafc9f235b37695dd68a0db6fdc8478b740077d77159d1108f WatchSource:0}: Error finding container 4d03140b7769d9bafc9f235b37695dd68a0db6fdc8478b740077d77159d1108f: Status 404 returned error can't find the container with id 4d03140b7769d9bafc9f235b37695dd68a0db6fdc8478b740077d77159d1108f Feb 27 16:29:33 crc kubenswrapper[4751]: E0227 16:29:33.684540 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:29:33Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:29:33Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:29:33Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-27T16:29:33Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: E0227 16:29:33.685159 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: E0227 16:29:33.685505 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: E0227 16:29:33.685947 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: E0227 16:29:33.686452 4751 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:33 crc kubenswrapper[4751]: E0227 16:29:33.686485 4751 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.228574 4751 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="bcec1cfa4049b26aff20c15fccd7ac2d0b3666fc59564b5a0cf6163eeb1650c1" exitCode=0 Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.228656 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"bcec1cfa4049b26aff20c15fccd7ac2d0b3666fc59564b5a0cf6163eeb1650c1"} Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.228686 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"4d03140b7769d9bafc9f235b37695dd68a0db6fdc8478b740077d77159d1108f"} Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.228959 4751 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="66626c61-3f6b-48d2-92e6-a061f0c0a2bb" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.228972 4751 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="66626c61-3f6b-48d2-92e6-a061f0c0a2bb" Feb 27 16:29:34 crc kubenswrapper[4751]: E0227 16:29:34.229382 4751 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.229723 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.230230 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.231020 4751 status_manager.go:851] "Failed to get status for pod" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" pod="openshift-marketplace/redhat-operators-r48bt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r48bt\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.231576 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.232023 4751 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.232491 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.233135 4751 status_manager.go:851] "Failed to get status for pod" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" pod="openshift-marketplace/redhat-marketplace-k5lsc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-k5lsc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.237088 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/cluster-policy-controller/0.log" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.239115 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.239194 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3440e6a876da699d5916c4eb0c4ae98c802d889d59178bddb12e795025c06923"} Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.240076 4751 status_manager.go:851] "Failed to get status for pod" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" pod="openshift-marketplace/community-operators-94pcv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-94pcv\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.240472 4751 status_manager.go:851] "Failed to get status for pod" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" pod="openshift-marketplace/redhat-operators-r48bt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r48bt\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.240946 4751 status_manager.go:851] "Failed to get status for pod" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" pod="openshift-marketplace/redhat-marketplace-n8xtl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8xtl\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.241351 4751 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.241685 4751 status_manager.go:851] "Failed to get status for pod" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.242135 4751 status_manager.go:851] "Failed to get status for pod" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" pod="openshift-marketplace/redhat-marketplace-k5lsc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-k5lsc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:34 crc kubenswrapper[4751]: I0227 16:29:34.242576 4751 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.64:6443: connect: connection refused" Feb 27 16:29:35 crc kubenswrapper[4751]: I0227 16:29:35.253675 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"1ce4e1f9c0b572458c6f19902b71d47ee9b31ed8ce2ab091b058710212f89d7a"} Feb 27 16:29:35 crc kubenswrapper[4751]: I0227 16:29:35.253756 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"602c8ed282a4ec2d4172f0c6ce51d6e3ede06c38a034f345e767af4d809691ac"} Feb 27 16:29:35 crc kubenswrapper[4751]: I0227 16:29:35.253784 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"409e6406bc8a824071e2c2df1b13c5173d5ff59566f2695c8ec43dba0c7f06b0"} Feb 27 16:29:36 crc kubenswrapper[4751]: I0227 16:29:36.264951 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e26941bb77381805168d50e0e790fa5bf71169d21fe6d23e4ebd8bb3e999e851"} Feb 27 16:29:36 crc kubenswrapper[4751]: I0227 16:29:36.265386 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d5b6641da6af7398a7afed746993d1e3ed02abe2a237afdb61a0a91a35897b08"} Feb 27 16:29:36 crc kubenswrapper[4751]: I0227 16:29:36.265456 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:36 crc kubenswrapper[4751]: I0227 16:29:36.265486 4751 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="66626c61-3f6b-48d2-92e6-a061f0c0a2bb" Feb 27 16:29:36 crc kubenswrapper[4751]: I0227 16:29:36.265528 4751 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="66626c61-3f6b-48d2-92e6-a061f0c0a2bb" Feb 27 16:29:38 crc kubenswrapper[4751]: I0227 16:29:38.544854 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:38 crc kubenswrapper[4751]: I0227 16:29:38.545609 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:38 crc kubenswrapper[4751]: I0227 16:29:38.553929 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:40 crc kubenswrapper[4751]: I0227 16:29:40.254376 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:29:40 crc kubenswrapper[4751]: I0227 16:29:40.261846 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:29:40 crc kubenswrapper[4751]: I0227 16:29:40.292311 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:29:41 crc kubenswrapper[4751]: I0227 16:29:41.296551 4751 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:29:41 crc kubenswrapper[4751]: I0227 16:29:41.505819 4751 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="a5e5eeed-c9a3-4d76-b989-639346439ac4" Feb 27 16:29:42 crc kubenswrapper[4751]: I0227 16:29:42.303489 4751 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="66626c61-3f6b-48d2-92e6-a061f0c0a2bb" Feb 27 16:29:42 crc kubenswrapper[4751]: I0227 16:29:42.303525 4751 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="66626c61-3f6b-48d2-92e6-a061f0c0a2bb" Feb 27 16:29:42 crc kubenswrapper[4751]: I0227 16:29:42.306215 4751 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="a5e5eeed-c9a3-4d76-b989-639346439ac4" Feb 27 16:29:50 crc kubenswrapper[4751]: I0227 16:29:50.780143 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Feb 27 16:29:50 crc kubenswrapper[4751]: I0227 16:29:50.886915 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Feb 27 16:29:51 crc kubenswrapper[4751]: I0227 16:29:51.110119 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 27 16:29:51 crc kubenswrapper[4751]: I0227 16:29:51.449165 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Feb 27 16:29:51 crc kubenswrapper[4751]: I0227 16:29:51.483533 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Feb 27 16:29:51 crc kubenswrapper[4751]: I0227 16:29:51.964355 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 27 16:29:52 crc kubenswrapper[4751]: I0227 16:29:52.467241 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 27 16:29:52 crc kubenswrapper[4751]: I0227 16:29:52.623933 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Feb 27 16:29:52 crc kubenswrapper[4751]: I0227 16:29:52.799056 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.385234 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.403876 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.441145 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.464131 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.472546 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.556976 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.688557 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.728187 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.735229 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.752390 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.796736 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.864164 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.944611 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Feb 27 16:29:53 crc kubenswrapper[4751]: I0227 16:29:53.971972 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.087339 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.191657 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.314502 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.376350 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.385513 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.473151 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.681243 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.703789 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.869101 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.875588 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.928756 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.935943 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Feb 27 16:29:54 crc kubenswrapper[4751]: I0227 16:29:54.978948 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.028436 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.108712 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.185631 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.284358 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.563957 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.604544 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.709350 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.713720 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.716226 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.723776 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.745365 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.786763 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.797894 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.812884 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.828373 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.851089 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.896821 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.898787 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Feb 27 16:29:55 crc kubenswrapper[4751]: I0227 16:29:55.991146 4751 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.030995 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.042702 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.183691 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.206157 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.212067 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.411235 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.456104 4751 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.470261 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.470261 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.486482 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.661130 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.707998 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.718690 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.788060 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.818606 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 27 16:29:56 crc kubenswrapper[4751]: I0227 16:29:56.893361 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.027113 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.027823 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.030215 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.066986 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.106889 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.199384 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.206788 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.251813 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.284053 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.370908 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.386600 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.473193 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.719538 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.738771 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.756824 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.777130 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.813809 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.818960 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.841669 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.845907 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.921952 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.927554 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.941234 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Feb 27 16:29:57 crc kubenswrapper[4751]: I0227 16:29:57.996057 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.054989 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.112244 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.212945 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.385246 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.495453 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.499629 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.563202 4751 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.657311 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.661418 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.736313 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.802393 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Feb 27 16:29:58 crc kubenswrapper[4751]: I0227 16:29:58.898519 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.078752 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.123942 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.129146 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.155332 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.283335 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.290458 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.331985 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.340900 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.381533 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.388056 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.525111 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.531319 4751 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.547947 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.584880 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.617089 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.630581 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.689519 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.782208 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.826461 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Feb 27 16:29:59 crc kubenswrapper[4751]: I0227 16:29:59.995606 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.037073 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.069098 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.099781 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.195170 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.271683 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.272537 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.338021 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.340118 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.405100 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.411886 4751 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.413955 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=42.413935295 podStartE2EDuration="42.413935295s" podCreationTimestamp="2026-02-27 16:29:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:29:41.413094418 +0000 UTC m=+343.560108885" watchObservedRunningTime="2026-02-27 16:30:00.413935295 +0000 UTC m=+362.560949752" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.416380 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.416464 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-78446ffb65-x55kb","openshift-kube-apiserver/kube-apiserver-crc"] Feb 27 16:30:00 crc kubenswrapper[4751]: E0227 16:30:00.416671 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" containerName="installer" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.416690 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" containerName="installer" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.416785 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbbfa35f-11f5-4a0e-b65e-6ca317880932" containerName="installer" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.416959 4751 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="66626c61-3f6b-48d2-92e6-a061f0c0a2bb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.416994 4751 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="66626c61-3f6b-48d2-92e6-a061f0c0a2bb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.417261 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.419002 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.419828 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.420360 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.420742 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.420774 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.421226 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.421552 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.421955 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.422064 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.422274 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.422498 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.422631 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.422673 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.422783 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.428723 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.435344 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.443439 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=19.443393635 podStartE2EDuration="19.443393635s" podCreationTimestamp="2026-02-27 16:29:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:30:00.438557833 +0000 UTC m=+362.585572300" watchObservedRunningTime="2026-02-27 16:30:00.443393635 +0000 UTC m=+362.590408082" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.443600 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.522088 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.533100 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.546573 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-audit-dir\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.546627 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-template-error\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547048 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547100 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-router-certs\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547280 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547329 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-template-login\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547393 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547433 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-serving-cert\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547455 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547524 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-audit-policies\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547555 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-cliconfig\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547605 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffb28\" (UniqueName: \"kubernetes.io/projected/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-kube-api-access-ffb28\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547642 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-service-ca\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.547693 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-session\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.561733 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.565855 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.635061 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648317 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-service-ca\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648387 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-session\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648518 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-audit-dir\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648544 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-template-error\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648581 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648626 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648659 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-router-certs\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648697 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648720 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-template-login\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648744 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.649992 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-serving-cert\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.650023 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.648740 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-audit-dir\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.649567 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-service-ca\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.650351 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-audit-policies\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.650484 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-cliconfig\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.650546 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffb28\" (UniqueName: \"kubernetes.io/projected/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-kube-api-access-ffb28\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.651323 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.652001 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-cliconfig\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.652351 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-audit-policies\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.656797 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.656901 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-serving-cert\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.656965 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-template-login\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.657590 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-template-error\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.657828 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-session\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.658316 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.659362 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-system-router-certs\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.661934 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.671729 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffb28\" (UniqueName: \"kubernetes.io/projected/a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912-kube-api-access-ffb28\") pod \"oauth-openshift-78446ffb65-x55kb\" (UID: \"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912\") " pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.693025 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.702753 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.738937 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.832495 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.862566 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.909618 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.963611 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Feb 27 16:30:00 crc kubenswrapper[4751]: I0227 16:30:00.983898 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.011549 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.033118 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.176885 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-78446ffb65-x55kb"] Feb 27 16:30:01 crc kubenswrapper[4751]: W0227 16:30:01.180586 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4f1fcc2_5285_4c5e_b867_1c5b6e4ce912.slice/crio-170cfc9c8dcd886be0e8fbba54ce6bb6ac56f569ceff0f5d18131b92c124ef4d WatchSource:0}: Error finding container 170cfc9c8dcd886be0e8fbba54ce6bb6ac56f569ceff0f5d18131b92c124ef4d: Status 404 returned error can't find the container with id 170cfc9c8dcd886be0e8fbba54ce6bb6ac56f569ceff0f5d18131b92c124ef4d Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.206299 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.312796 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.316689 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf"] Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.317305 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.318788 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.319936 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.320562 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536830-lp72m"] Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.321103 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536830-lp72m" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.323429 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.323778 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.324040 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.347690 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536830-lp72m"] Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.357533 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/193bf22c-1f33-4d04-8688-f67aef4fc77e-secret-volume\") pod \"collect-profiles-29536830-pggzf\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.357578 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bf68\" (UniqueName: \"kubernetes.io/projected/00cac46b-1c35-43d3-82ba-777e5ebb11c4-kube-api-access-9bf68\") pod \"auto-csr-approver-29536830-lp72m\" (UID: \"00cac46b-1c35-43d3-82ba-777e5ebb11c4\") " pod="openshift-infra/auto-csr-approver-29536830-lp72m" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.357614 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8w7r6\" (UniqueName: \"kubernetes.io/projected/193bf22c-1f33-4d04-8688-f67aef4fc77e-kube-api-access-8w7r6\") pod \"collect-profiles-29536830-pggzf\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.357644 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/193bf22c-1f33-4d04-8688-f67aef4fc77e-config-volume\") pod \"collect-profiles-29536830-pggzf\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.370946 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.377271 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf"] Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.422167 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" event={"ID":"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912","Type":"ContainerStarted","Data":"170cfc9c8dcd886be0e8fbba54ce6bb6ac56f569ceff0f5d18131b92c124ef4d"} Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.458321 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8w7r6\" (UniqueName: \"kubernetes.io/projected/193bf22c-1f33-4d04-8688-f67aef4fc77e-kube-api-access-8w7r6\") pod \"collect-profiles-29536830-pggzf\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.458376 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/193bf22c-1f33-4d04-8688-f67aef4fc77e-config-volume\") pod \"collect-profiles-29536830-pggzf\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.458435 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/193bf22c-1f33-4d04-8688-f67aef4fc77e-secret-volume\") pod \"collect-profiles-29536830-pggzf\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.458454 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bf68\" (UniqueName: \"kubernetes.io/projected/00cac46b-1c35-43d3-82ba-777e5ebb11c4-kube-api-access-9bf68\") pod \"auto-csr-approver-29536830-lp72m\" (UID: \"00cac46b-1c35-43d3-82ba-777e5ebb11c4\") " pod="openshift-infra/auto-csr-approver-29536830-lp72m" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.461208 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/193bf22c-1f33-4d04-8688-f67aef4fc77e-config-volume\") pod \"collect-profiles-29536830-pggzf\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.465907 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/193bf22c-1f33-4d04-8688-f67aef4fc77e-secret-volume\") pod \"collect-profiles-29536830-pggzf\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.475108 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bf68\" (UniqueName: \"kubernetes.io/projected/00cac46b-1c35-43d3-82ba-777e5ebb11c4-kube-api-access-9bf68\") pod \"auto-csr-approver-29536830-lp72m\" (UID: \"00cac46b-1c35-43d3-82ba-777e5ebb11c4\") " pod="openshift-infra/auto-csr-approver-29536830-lp72m" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.480018 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8w7r6\" (UniqueName: \"kubernetes.io/projected/193bf22c-1f33-4d04-8688-f67aef4fc77e-kube-api-access-8w7r6\") pod \"collect-profiles-29536830-pggzf\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.611119 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.618257 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.636366 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.639969 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.655452 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536830-lp72m" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.680293 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.819263 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Feb 27 16:30:01 crc kubenswrapper[4751]: I0227 16:30:01.927565 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536830-lp72m"] Feb 27 16:30:01 crc kubenswrapper[4751]: W0227 16:30:01.934866 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00cac46b_1c35_43d3_82ba_777e5ebb11c4.slice/crio-21d23dfbf9fd1f2fede3f844b8f48a970427d376bece95a77a23d7cf6ddb327b WatchSource:0}: Error finding container 21d23dfbf9fd1f2fede3f844b8f48a970427d376bece95a77a23d7cf6ddb327b: Status 404 returned error can't find the container with id 21d23dfbf9fd1f2fede3f844b8f48a970427d376bece95a77a23d7cf6ddb327b Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.036150 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.067621 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf"] Feb 27 16:30:02 crc kubenswrapper[4751]: W0227 16:30:02.076005 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod193bf22c_1f33_4d04_8688_f67aef4fc77e.slice/crio-9f20cef81acb3a1e4f0d9dbecf919367dfb7a11c23b7edb8020c94c8933d924a WatchSource:0}: Error finding container 9f20cef81acb3a1e4f0d9dbecf919367dfb7a11c23b7edb8020c94c8933d924a: Status 404 returned error can't find the container with id 9f20cef81acb3a1e4f0d9dbecf919367dfb7a11c23b7edb8020c94c8933d924a Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.081378 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.091768 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.146291 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.227643 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.434763 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" event={"ID":"a4f1fcc2-5285-4c5e-b867-1c5b6e4ce912","Type":"ContainerStarted","Data":"0509b852e3440be418ceba95ce69075838ebf5e65b7353a2dea09e794bc04e34"} Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.436211 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.438023 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536830-lp72m" event={"ID":"00cac46b-1c35-43d3-82ba-777e5ebb11c4","Type":"ContainerStarted","Data":"21d23dfbf9fd1f2fede3f844b8f48a970427d376bece95a77a23d7cf6ddb327b"} Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.441081 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" event={"ID":"193bf22c-1f33-4d04-8688-f67aef4fc77e","Type":"ContainerStarted","Data":"3e87f972a61a3a942f6e35945ca33d4311334bedb4aa43d97c8816350a0556d8"} Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.441141 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" event={"ID":"193bf22c-1f33-4d04-8688-f67aef4fc77e","Type":"ContainerStarted","Data":"9f20cef81acb3a1e4f0d9dbecf919367dfb7a11c23b7edb8020c94c8933d924a"} Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.458250 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" podStartSLOduration=80.458235945 podStartE2EDuration="1m20.458235945s" podCreationTimestamp="2026-02-27 16:28:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:30:02.457415722 +0000 UTC m=+364.604430169" watchObservedRunningTime="2026-02-27 16:30:02.458235945 +0000 UTC m=+364.605250382" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.467857 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.469249 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.475497 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" podStartSLOduration=1.475477373 podStartE2EDuration="1.475477373s" podCreationTimestamp="2026-02-27 16:30:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:30:02.474337002 +0000 UTC m=+364.621351469" watchObservedRunningTime="2026-02-27 16:30:02.475477373 +0000 UTC m=+364.622491830" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.533079 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.714118 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-78446ffb65-x55kb" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.734067 4751 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.734281 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa" gracePeriod=5 Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.831114 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.874419 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.941367 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.953613 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Feb 27 16:30:02 crc kubenswrapper[4751]: I0227 16:30:02.978275 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.100858 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.132154 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.133210 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.161085 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.190081 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.415316 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.460830 4751 generic.go:334] "Generic (PLEG): container finished" podID="193bf22c-1f33-4d04-8688-f67aef4fc77e" containerID="3e87f972a61a3a942f6e35945ca33d4311334bedb4aa43d97c8816350a0556d8" exitCode=0 Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.461530 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" event={"ID":"193bf22c-1f33-4d04-8688-f67aef4fc77e","Type":"ContainerDied","Data":"3e87f972a61a3a942f6e35945ca33d4311334bedb4aa43d97c8816350a0556d8"} Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.507791 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.708983 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.747479 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.748634 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.788777 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.892800 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Feb 27 16:30:03 crc kubenswrapper[4751]: I0227 16:30:03.925831 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.029565 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.042791 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.064640 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.069089 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.102022 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.113422 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.200275 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.223130 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.271348 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.337099 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.398705 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.548984 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.549612 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.601385 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.622900 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.624421 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.735459 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.892785 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.904992 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8w7r6\" (UniqueName: \"kubernetes.io/projected/193bf22c-1f33-4d04-8688-f67aef4fc77e-kube-api-access-8w7r6\") pod \"193bf22c-1f33-4d04-8688-f67aef4fc77e\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.905204 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/193bf22c-1f33-4d04-8688-f67aef4fc77e-secret-volume\") pod \"193bf22c-1f33-4d04-8688-f67aef4fc77e\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.905272 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/193bf22c-1f33-4d04-8688-f67aef4fc77e-config-volume\") pod \"193bf22c-1f33-4d04-8688-f67aef4fc77e\" (UID: \"193bf22c-1f33-4d04-8688-f67aef4fc77e\") " Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.906928 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/193bf22c-1f33-4d04-8688-f67aef4fc77e-config-volume" (OuterVolumeSpecName: "config-volume") pod "193bf22c-1f33-4d04-8688-f67aef4fc77e" (UID: "193bf22c-1f33-4d04-8688-f67aef4fc77e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.907043 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.915022 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/193bf22c-1f33-4d04-8688-f67aef4fc77e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "193bf22c-1f33-4d04-8688-f67aef4fc77e" (UID: "193bf22c-1f33-4d04-8688-f67aef4fc77e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.924462 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/193bf22c-1f33-4d04-8688-f67aef4fc77e-kube-api-access-8w7r6" (OuterVolumeSpecName: "kube-api-access-8w7r6") pod "193bf22c-1f33-4d04-8688-f67aef4fc77e" (UID: "193bf22c-1f33-4d04-8688-f67aef4fc77e"). InnerVolumeSpecName "kube-api-access-8w7r6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.927698 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.958260 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Feb 27 16:30:04 crc kubenswrapper[4751]: I0227 16:30:04.967326 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.006536 4751 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/193bf22c-1f33-4d04-8688-f67aef4fc77e-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.006564 4751 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/193bf22c-1f33-4d04-8688-f67aef4fc77e-config-volume\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.006574 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8w7r6\" (UniqueName: \"kubernetes.io/projected/193bf22c-1f33-4d04-8688-f67aef4fc77e-kube-api-access-8w7r6\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.201682 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.387596 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.479632 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.480987 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" event={"ID":"193bf22c-1f33-4d04-8688-f67aef4fc77e","Type":"ContainerDied","Data":"9f20cef81acb3a1e4f0d9dbecf919367dfb7a11c23b7edb8020c94c8933d924a"} Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.481064 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f20cef81acb3a1e4f0d9dbecf919367dfb7a11c23b7edb8020c94c8933d924a" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.481171 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.588075 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.604971 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.628877 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Feb 27 16:30:05 crc kubenswrapper[4751]: I0227 16:30:05.658378 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Feb 27 16:30:06 crc kubenswrapper[4751]: I0227 16:30:06.085286 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Feb 27 16:30:06 crc kubenswrapper[4751]: I0227 16:30:06.150817 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Feb 27 16:30:06 crc kubenswrapper[4751]: I0227 16:30:06.260762 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Feb 27 16:30:06 crc kubenswrapper[4751]: I0227 16:30:06.435582 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Feb 27 16:30:06 crc kubenswrapper[4751]: I0227 16:30:06.490969 4751 generic.go:334] "Generic (PLEG): container finished" podID="00cac46b-1c35-43d3-82ba-777e5ebb11c4" containerID="06d49298e2ca94c43982065aaa20ab384a489973a7724f8b8a5cc8ecb6cd953d" exitCode=0 Feb 27 16:30:06 crc kubenswrapper[4751]: I0227 16:30:06.491031 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536830-lp72m" event={"ID":"00cac46b-1c35-43d3-82ba-777e5ebb11c4","Type":"ContainerDied","Data":"06d49298e2ca94c43982065aaa20ab384a489973a7724f8b8a5cc8ecb6cd953d"} Feb 27 16:30:06 crc kubenswrapper[4751]: I0227 16:30:06.638981 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Feb 27 16:30:06 crc kubenswrapper[4751]: I0227 16:30:06.860379 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Feb 27 16:30:07 crc kubenswrapper[4751]: I0227 16:30:07.132862 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Feb 27 16:30:07 crc kubenswrapper[4751]: I0227 16:30:07.296312 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 27 16:30:07 crc kubenswrapper[4751]: I0227 16:30:07.455750 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Feb 27 16:30:07 crc kubenswrapper[4751]: I0227 16:30:07.521680 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Feb 27 16:30:07 crc kubenswrapper[4751]: I0227 16:30:07.898044 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536830-lp72m" Feb 27 16:30:07 crc kubenswrapper[4751]: I0227 16:30:07.903812 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 27 16:30:07 crc kubenswrapper[4751]: I0227 16:30:07.903893 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:30:07 crc kubenswrapper[4751]: I0227 16:30:07.962289 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.064395 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.064533 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.064570 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.064610 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.064591 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.064634 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bf68\" (UniqueName: \"kubernetes.io/projected/00cac46b-1c35-43d3-82ba-777e5ebb11c4-kube-api-access-9bf68\") pod \"00cac46b-1c35-43d3-82ba-777e5ebb11c4\" (UID: \"00cac46b-1c35-43d3-82ba-777e5ebb11c4\") " Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.064750 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.064789 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.064860 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.065202 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.065514 4751 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.065607 4751 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.065671 4751 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.065729 4751 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.072731 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00cac46b-1c35-43d3-82ba-777e5ebb11c4-kube-api-access-9bf68" (OuterVolumeSpecName: "kube-api-access-9bf68") pod "00cac46b-1c35-43d3-82ba-777e5ebb11c4" (UID: "00cac46b-1c35-43d3-82ba-777e5ebb11c4"). InnerVolumeSpecName "kube-api-access-9bf68". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.076097 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.167008 4751 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.167048 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bf68\" (UniqueName: \"kubernetes.io/projected/00cac46b-1c35-43d3-82ba-777e5ebb11c4-kube-api-access-9bf68\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.504036 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536830-lp72m" event={"ID":"00cac46b-1c35-43d3-82ba-777e5ebb11c4","Type":"ContainerDied","Data":"21d23dfbf9fd1f2fede3f844b8f48a970427d376bece95a77a23d7cf6ddb327b"} Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.504105 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21d23dfbf9fd1f2fede3f844b8f48a970427d376bece95a77a23d7cf6ddb327b" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.504772 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536830-lp72m" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.505940 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.506030 4751 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa" exitCode=137 Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.506094 4751 scope.go:117] "RemoveContainer" containerID="207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.506131 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.526667 4751 scope.go:117] "RemoveContainer" containerID="207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa" Feb 27 16:30:08 crc kubenswrapper[4751]: E0227 16:30:08.526981 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa\": container with ID starting with 207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa not found: ID does not exist" containerID="207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.527026 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa"} err="failed to get container status \"207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa\": rpc error: code = NotFound desc = could not find container \"207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa\": container with ID starting with 207b107bb5115b9bfd5a6aea995cf996103c7b0ea08bb10ceb6357d38db54faa not found: ID does not exist" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.528932 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.529207 4751 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.542285 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.542346 4751 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="e60f37fa-0436-4793-850e-c7152db20990" Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.548802 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 27 16:30:08 crc kubenswrapper[4751]: I0227 16:30:08.548862 4751 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="e60f37fa-0436-4793-850e-c7152db20990" Feb 27 16:30:14 crc kubenswrapper[4751]: I0227 16:30:14.838839 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Feb 27 16:30:14 crc kubenswrapper[4751]: I0227 16:30:14.864688 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Feb 27 16:30:16 crc kubenswrapper[4751]: I0227 16:30:16.619969 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Feb 27 16:30:19 crc kubenswrapper[4751]: I0227 16:30:19.516700 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck"] Feb 27 16:30:19 crc kubenswrapper[4751]: I0227 16:30:19.517238 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" podUID="aaacc455-81c3-4f96-91ef-2b45ff0017d2" containerName="controller-manager" containerID="cri-o://0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3" gracePeriod=30 Feb 27 16:30:19 crc kubenswrapper[4751]: I0227 16:30:19.606182 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb"] Feb 27 16:30:19 crc kubenswrapper[4751]: I0227 16:30:19.606679 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" podUID="b48f83f9-1144-4354-9150-e425bda781bc" containerName="route-controller-manager" containerID="cri-o://724468fedc84bce446b4f7075ccbe9a7751b7c3fcf0020eafb72eda9a3ecc0e9" gracePeriod=30 Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.086850 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.251443 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-config\") pod \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.251538 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-client-ca\") pod \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.251636 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trtdq\" (UniqueName: \"kubernetes.io/projected/aaacc455-81c3-4f96-91ef-2b45ff0017d2-kube-api-access-trtdq\") pod \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.251677 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-proxy-ca-bundles\") pod \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.251793 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaacc455-81c3-4f96-91ef-2b45ff0017d2-serving-cert\") pod \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\" (UID: \"aaacc455-81c3-4f96-91ef-2b45ff0017d2\") " Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.252718 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "aaacc455-81c3-4f96-91ef-2b45ff0017d2" (UID: "aaacc455-81c3-4f96-91ef-2b45ff0017d2"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.252772 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-client-ca" (OuterVolumeSpecName: "client-ca") pod "aaacc455-81c3-4f96-91ef-2b45ff0017d2" (UID: "aaacc455-81c3-4f96-91ef-2b45ff0017d2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.252794 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-config" (OuterVolumeSpecName: "config") pod "aaacc455-81c3-4f96-91ef-2b45ff0017d2" (UID: "aaacc455-81c3-4f96-91ef-2b45ff0017d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.258394 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aaacc455-81c3-4f96-91ef-2b45ff0017d2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "aaacc455-81c3-4f96-91ef-2b45ff0017d2" (UID: "aaacc455-81c3-4f96-91ef-2b45ff0017d2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.260148 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aaacc455-81c3-4f96-91ef-2b45ff0017d2-kube-api-access-trtdq" (OuterVolumeSpecName: "kube-api-access-trtdq") pod "aaacc455-81c3-4f96-91ef-2b45ff0017d2" (UID: "aaacc455-81c3-4f96-91ef-2b45ff0017d2"). InnerVolumeSpecName "kube-api-access-trtdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.352793 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.352840 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.352860 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trtdq\" (UniqueName: \"kubernetes.io/projected/aaacc455-81c3-4f96-91ef-2b45ff0017d2-kube-api-access-trtdq\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.352873 4751 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aaacc455-81c3-4f96-91ef-2b45ff0017d2-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.352885 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaacc455-81c3-4f96-91ef-2b45ff0017d2-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.652845 4751 generic.go:334] "Generic (PLEG): container finished" podID="b48f83f9-1144-4354-9150-e425bda781bc" containerID="724468fedc84bce446b4f7075ccbe9a7751b7c3fcf0020eafb72eda9a3ecc0e9" exitCode=0 Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.652952 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" event={"ID":"b48f83f9-1144-4354-9150-e425bda781bc","Type":"ContainerDied","Data":"724468fedc84bce446b4f7075ccbe9a7751b7c3fcf0020eafb72eda9a3ecc0e9"} Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.654505 4751 generic.go:334] "Generic (PLEG): container finished" podID="aaacc455-81c3-4f96-91ef-2b45ff0017d2" containerID="0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3" exitCode=0 Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.654529 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" event={"ID":"aaacc455-81c3-4f96-91ef-2b45ff0017d2","Type":"ContainerDied","Data":"0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3"} Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.654543 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" event={"ID":"aaacc455-81c3-4f96-91ef-2b45ff0017d2","Type":"ContainerDied","Data":"c83a36615d73d21ba0a84ebbae594e97ddd0d9019302ee0ef74a8a3e8f0d4237"} Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.654559 4751 scope.go:117] "RemoveContainer" containerID="0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.654741 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.677071 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6698df76c9-69fmx"] Feb 27 16:30:20 crc kubenswrapper[4751]: E0227 16:30:20.677323 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aaacc455-81c3-4f96-91ef-2b45ff0017d2" containerName="controller-manager" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.677336 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="aaacc455-81c3-4f96-91ef-2b45ff0017d2" containerName="controller-manager" Feb 27 16:30:20 crc kubenswrapper[4751]: E0227 16:30:20.677346 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00cac46b-1c35-43d3-82ba-777e5ebb11c4" containerName="oc" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.677353 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="00cac46b-1c35-43d3-82ba-777e5ebb11c4" containerName="oc" Feb 27 16:30:20 crc kubenswrapper[4751]: E0227 16:30:20.677367 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="193bf22c-1f33-4d04-8688-f67aef4fc77e" containerName="collect-profiles" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.677373 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="193bf22c-1f33-4d04-8688-f67aef4fc77e" containerName="collect-profiles" Feb 27 16:30:20 crc kubenswrapper[4751]: E0227 16:30:20.677386 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.677392 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.677499 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="193bf22c-1f33-4d04-8688-f67aef4fc77e" containerName="collect-profiles" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.677508 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="aaacc455-81c3-4f96-91ef-2b45ff0017d2" containerName="controller-manager" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.677520 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.677532 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="00cac46b-1c35-43d3-82ba-777e5ebb11c4" containerName="oc" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.677892 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.680480 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.680693 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.681262 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.681290 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.681447 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.681675 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.682608 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck"] Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.683338 4751 scope.go:117] "RemoveContainer" containerID="0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3" Feb 27 16:30:20 crc kubenswrapper[4751]: E0227 16:30:20.684034 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3\": container with ID starting with 0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3 not found: ID does not exist" containerID="0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.684072 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3"} err="failed to get container status \"0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3\": rpc error: code = NotFound desc = could not find container \"0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3\": container with ID starting with 0bcffef6b7f20c61f0e8878834860c9563a42870bf01e74e2d45ddb47c4f39d3 not found: ID does not exist" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.686853 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.691632 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6d9fcf5cbd-mzpck"] Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.697213 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6698df76c9-69fmx"] Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.854143 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.860013 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0324491-719a-4729-bd50-a4f7a94360c5-serving-cert\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.860266 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-client-ca\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.860380 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-proxy-ca-bundles\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.860423 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqfck\" (UniqueName: \"kubernetes.io/projected/f0324491-719a-4729-bd50-a4f7a94360c5-kube-api-access-cqfck\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.860453 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-config\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.960854 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grw5j\" (UniqueName: \"kubernetes.io/projected/b48f83f9-1144-4354-9150-e425bda781bc-kube-api-access-grw5j\") pod \"b48f83f9-1144-4354-9150-e425bda781bc\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.960964 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-client-ca\") pod \"b48f83f9-1144-4354-9150-e425bda781bc\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.961056 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-config\") pod \"b48f83f9-1144-4354-9150-e425bda781bc\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.961088 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b48f83f9-1144-4354-9150-e425bda781bc-serving-cert\") pod \"b48f83f9-1144-4354-9150-e425bda781bc\" (UID: \"b48f83f9-1144-4354-9150-e425bda781bc\") " Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.961277 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-client-ca\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.961322 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-proxy-ca-bundles\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.961351 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqfck\" (UniqueName: \"kubernetes.io/projected/f0324491-719a-4729-bd50-a4f7a94360c5-kube-api-access-cqfck\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.961377 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-config\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.961446 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0324491-719a-4729-bd50-a4f7a94360c5-serving-cert\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.962323 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-client-ca" (OuterVolumeSpecName: "client-ca") pod "b48f83f9-1144-4354-9150-e425bda781bc" (UID: "b48f83f9-1144-4354-9150-e425bda781bc"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.962758 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-config" (OuterVolumeSpecName: "config") pod "b48f83f9-1144-4354-9150-e425bda781bc" (UID: "b48f83f9-1144-4354-9150-e425bda781bc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.964067 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-client-ca\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.964433 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-config\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.964461 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-proxy-ca-bundles\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.965472 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0324491-719a-4729-bd50-a4f7a94360c5-serving-cert\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.966775 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b48f83f9-1144-4354-9150-e425bda781bc-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b48f83f9-1144-4354-9150-e425bda781bc" (UID: "b48f83f9-1144-4354-9150-e425bda781bc"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.974583 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b48f83f9-1144-4354-9150-e425bda781bc-kube-api-access-grw5j" (OuterVolumeSpecName: "kube-api-access-grw5j") pod "b48f83f9-1144-4354-9150-e425bda781bc" (UID: "b48f83f9-1144-4354-9150-e425bda781bc"). InnerVolumeSpecName "kube-api-access-grw5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.978195 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqfck\" (UniqueName: \"kubernetes.io/projected/f0324491-719a-4729-bd50-a4f7a94360c5-kube-api-access-cqfck\") pod \"controller-manager-6698df76c9-69fmx\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:20 crc kubenswrapper[4751]: I0227 16:30:20.993964 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.062926 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.062983 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b48f83f9-1144-4354-9150-e425bda781bc-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.063191 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b48f83f9-1144-4354-9150-e425bda781bc-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.063241 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grw5j\" (UniqueName: \"kubernetes.io/projected/b48f83f9-1144-4354-9150-e425bda781bc-kube-api-access-grw5j\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.406531 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6698df76c9-69fmx"] Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.662001 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" event={"ID":"f0324491-719a-4729-bd50-a4f7a94360c5","Type":"ContainerStarted","Data":"8ace42f01df9687c1884ff16d961c2e4e2e41a7594dfc360edd52c715261cd54"} Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.675004 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m"] Feb 27 16:30:21 crc kubenswrapper[4751]: E0227 16:30:21.675484 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b48f83f9-1144-4354-9150-e425bda781bc" containerName="route-controller-manager" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.675501 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b48f83f9-1144-4354-9150-e425bda781bc" containerName="route-controller-manager" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.675725 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b48f83f9-1144-4354-9150-e425bda781bc" containerName="route-controller-manager" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.676230 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.676618 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" event={"ID":"b48f83f9-1144-4354-9150-e425bda781bc","Type":"ContainerDied","Data":"97641da05dad6914f2bdd81b0b9a11a08cbac7cff1e5a1429b07143e081a3198"} Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.676709 4751 scope.go:117] "RemoveContainer" containerID="724468fedc84bce446b4f7075ccbe9a7751b7c3fcf0020eafb72eda9a3ecc0e9" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.676825 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.734969 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb"] Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.753722 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f94c788f6-95jxb"] Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.762015 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m"] Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.771889 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e62d10-3c25-4e57-ad35-026780366b73-config\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.771951 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00e62d10-3c25-4e57-ad35-026780366b73-client-ca\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.771978 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e62d10-3c25-4e57-ad35-026780366b73-serving-cert\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.771998 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn4ww\" (UniqueName: \"kubernetes.io/projected/00e62d10-3c25-4e57-ad35-026780366b73-kube-api-access-sn4ww\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.775788 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.872471 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00e62d10-3c25-4e57-ad35-026780366b73-client-ca\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.872532 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e62d10-3c25-4e57-ad35-026780366b73-serving-cert\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.872566 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn4ww\" (UniqueName: \"kubernetes.io/projected/00e62d10-3c25-4e57-ad35-026780366b73-kube-api-access-sn4ww\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.872666 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e62d10-3c25-4e57-ad35-026780366b73-config\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.879611 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00e62d10-3c25-4e57-ad35-026780366b73-client-ca\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.881283 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e62d10-3c25-4e57-ad35-026780366b73-config\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.896884 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e62d10-3c25-4e57-ad35-026780366b73-serving-cert\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:21 crc kubenswrapper[4751]: I0227 16:30:21.903450 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn4ww\" (UniqueName: \"kubernetes.io/projected/00e62d10-3c25-4e57-ad35-026780366b73-kube-api-access-sn4ww\") pod \"route-controller-manager-84457d6469-hvj8m\" (UID: \"00e62d10-3c25-4e57-ad35-026780366b73\") " pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:22 crc kubenswrapper[4751]: I0227 16:30:22.000021 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:22 crc kubenswrapper[4751]: I0227 16:30:22.413492 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m"] Feb 27 16:30:22 crc kubenswrapper[4751]: W0227 16:30:22.418078 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00e62d10_3c25_4e57_ad35_026780366b73.slice/crio-8a433c89a73144e53258ec13668fb5f8fbb1f89b0a0b59d499c5adde55cd8d75 WatchSource:0}: Error finding container 8a433c89a73144e53258ec13668fb5f8fbb1f89b0a0b59d499c5adde55cd8d75: Status 404 returned error can't find the container with id 8a433c89a73144e53258ec13668fb5f8fbb1f89b0a0b59d499c5adde55cd8d75 Feb 27 16:30:22 crc kubenswrapper[4751]: I0227 16:30:22.527054 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aaacc455-81c3-4f96-91ef-2b45ff0017d2" path="/var/lib/kubelet/pods/aaacc455-81c3-4f96-91ef-2b45ff0017d2/volumes" Feb 27 16:30:22 crc kubenswrapper[4751]: I0227 16:30:22.528121 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b48f83f9-1144-4354-9150-e425bda781bc" path="/var/lib/kubelet/pods/b48f83f9-1144-4354-9150-e425bda781bc/volumes" Feb 27 16:30:22 crc kubenswrapper[4751]: I0227 16:30:22.685046 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" event={"ID":"00e62d10-3c25-4e57-ad35-026780366b73","Type":"ContainerStarted","Data":"8a433c89a73144e53258ec13668fb5f8fbb1f89b0a0b59d499c5adde55cd8d75"} Feb 27 16:30:22 crc kubenswrapper[4751]: I0227 16:30:22.686967 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" event={"ID":"f0324491-719a-4729-bd50-a4f7a94360c5","Type":"ContainerStarted","Data":"b3b25bff5cadf515b378520c349ad87ec99465ee06e70d30ab32b357543ace1f"} Feb 27 16:30:22 crc kubenswrapper[4751]: I0227 16:30:22.687297 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:22 crc kubenswrapper[4751]: I0227 16:30:22.693242 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:22 crc kubenswrapper[4751]: I0227 16:30:22.737839 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" podStartSLOduration=3.737787269 podStartE2EDuration="3.737787269s" podCreationTimestamp="2026-02-27 16:30:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:30:22.7083765 +0000 UTC m=+384.855390967" watchObservedRunningTime="2026-02-27 16:30:22.737787269 +0000 UTC m=+384.884801716" Feb 27 16:30:23 crc kubenswrapper[4751]: I0227 16:30:23.693513 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" event={"ID":"00e62d10-3c25-4e57-ad35-026780366b73","Type":"ContainerStarted","Data":"673df84150a9ea7e0e88bf4fec493ab434765ca3c0faa0221fe7d0de2b6e988a"} Feb 27 16:30:24 crc kubenswrapper[4751]: I0227 16:30:24.553783 4751 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Feb 27 16:30:24 crc kubenswrapper[4751]: I0227 16:30:24.698358 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:24 crc kubenswrapper[4751]: I0227 16:30:24.703369 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" Feb 27 16:30:24 crc kubenswrapper[4751]: I0227 16:30:24.726724 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-84457d6469-hvj8m" podStartSLOduration=5.726701705 podStartE2EDuration="5.726701705s" podCreationTimestamp="2026-02-27 16:30:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:30:24.723234691 +0000 UTC m=+386.870249138" watchObservedRunningTime="2026-02-27 16:30:24.726701705 +0000 UTC m=+386.873716152" Feb 27 16:30:26 crc kubenswrapper[4751]: I0227 16:30:26.607533 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Feb 27 16:30:32 crc kubenswrapper[4751]: I0227 16:30:32.497160 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Feb 27 16:30:34 crc kubenswrapper[4751]: I0227 16:30:34.645370 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5lsc"] Feb 27 16:30:34 crc kubenswrapper[4751]: I0227 16:30:34.645669 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-k5lsc" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerName="registry-server" containerID="cri-o://e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67" gracePeriod=2 Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.301366 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.682784 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.697147 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.721046 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-catalog-content\") pod \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.748645 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2d42be40-69b6-49a3-a4ad-ff74df0c284e" (UID: "2d42be40-69b6-49a3-a4ad-ff74df0c284e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.791604 4751 generic.go:334] "Generic (PLEG): container finished" podID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerID="e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67" exitCode=0 Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.791667 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5lsc" event={"ID":"2d42be40-69b6-49a3-a4ad-ff74df0c284e","Type":"ContainerDied","Data":"e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67"} Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.791716 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5lsc" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.791723 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5lsc" event={"ID":"2d42be40-69b6-49a3-a4ad-ff74df0c284e","Type":"ContainerDied","Data":"78f8a4f08efc560bb6601710ac7dbb46305617b843ba325b6d453438372dbf3f"} Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.791748 4751 scope.go:117] "RemoveContainer" containerID="e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.814506 4751 scope.go:117] "RemoveContainer" containerID="b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.823612 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkrts\" (UniqueName: \"kubernetes.io/projected/2d42be40-69b6-49a3-a4ad-ff74df0c284e-kube-api-access-gkrts\") pod \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.823673 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-utilities\") pod \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\" (UID: \"2d42be40-69b6-49a3-a4ad-ff74df0c284e\") " Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.823868 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.825213 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-utilities" (OuterVolumeSpecName: "utilities") pod "2d42be40-69b6-49a3-a4ad-ff74df0c284e" (UID: "2d42be40-69b6-49a3-a4ad-ff74df0c284e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.834579 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d42be40-69b6-49a3-a4ad-ff74df0c284e-kube-api-access-gkrts" (OuterVolumeSpecName: "kube-api-access-gkrts") pod "2d42be40-69b6-49a3-a4ad-ff74df0c284e" (UID: "2d42be40-69b6-49a3-a4ad-ff74df0c284e"). InnerVolumeSpecName "kube-api-access-gkrts". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.843821 4751 scope.go:117] "RemoveContainer" containerID="fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.883496 4751 scope.go:117] "RemoveContainer" containerID="e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67" Feb 27 16:30:35 crc kubenswrapper[4751]: E0227 16:30:35.884290 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67\": container with ID starting with e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67 not found: ID does not exist" containerID="e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.884348 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67"} err="failed to get container status \"e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67\": rpc error: code = NotFound desc = could not find container \"e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67\": container with ID starting with e22e04a623eed6242f3189bafc9d4d61b2f9cd8403831504738cee145389ed67 not found: ID does not exist" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.884387 4751 scope.go:117] "RemoveContainer" containerID="b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f" Feb 27 16:30:35 crc kubenswrapper[4751]: E0227 16:30:35.884883 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f\": container with ID starting with b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f not found: ID does not exist" containerID="b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.884914 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f"} err="failed to get container status \"b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f\": rpc error: code = NotFound desc = could not find container \"b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f\": container with ID starting with b4a6fd7061983142d32f2451819c9fbd271ccdc11f4fba5b17692ce6551efc8f not found: ID does not exist" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.884930 4751 scope.go:117] "RemoveContainer" containerID="fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e" Feb 27 16:30:35 crc kubenswrapper[4751]: E0227 16:30:35.885568 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e\": container with ID starting with fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e not found: ID does not exist" containerID="fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.885598 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e"} err="failed to get container status \"fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e\": rpc error: code = NotFound desc = could not find container \"fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e\": container with ID starting with fee53ede9d76ef27573e797f0c26a17a20d334e8428a5b4704e8a97eacbffd9e not found: ID does not exist" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.924935 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d42be40-69b6-49a3-a4ad-ff74df0c284e-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:35 crc kubenswrapper[4751]: I0227 16:30:35.924996 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkrts\" (UniqueName: \"kubernetes.io/projected/2d42be40-69b6-49a3-a4ad-ff74df0c284e-kube-api-access-gkrts\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:36 crc kubenswrapper[4751]: I0227 16:30:36.126221 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5lsc"] Feb 27 16:30:36 crc kubenswrapper[4751]: I0227 16:30:36.132276 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5lsc"] Feb 27 16:30:36 crc kubenswrapper[4751]: I0227 16:30:36.528141 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" path="/var/lib/kubelet/pods/2d42be40-69b6-49a3-a4ad-ff74df0c284e/volumes" Feb 27 16:30:39 crc kubenswrapper[4751]: I0227 16:30:39.519576 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6698df76c9-69fmx"] Feb 27 16:30:39 crc kubenswrapper[4751]: I0227 16:30:39.520463 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" podUID="f0324491-719a-4729-bd50-a4f7a94360c5" containerName="controller-manager" containerID="cri-o://b3b25bff5cadf515b378520c349ad87ec99465ee06e70d30ab32b357543ace1f" gracePeriod=30 Feb 27 16:30:40 crc kubenswrapper[4751]: I0227 16:30:40.994850 4751 patch_prober.go:28] interesting pod/controller-manager-6698df76c9-69fmx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": dial tcp 10.217.0.69:8443: connect: connection refused" start-of-body= Feb 27 16:30:40 crc kubenswrapper[4751]: I0227 16:30:40.995339 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" podUID="f0324491-719a-4729-bd50-a4f7a94360c5" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": dial tcp 10.217.0.69:8443: connect: connection refused" Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.412285 4751 generic.go:334] "Generic (PLEG): container finished" podID="f0324491-719a-4729-bd50-a4f7a94360c5" containerID="b3b25bff5cadf515b378520c349ad87ec99465ee06e70d30ab32b357543ace1f" exitCode=0 Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.412326 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" event={"ID":"f0324491-719a-4729-bd50-a4f7a94360c5","Type":"ContainerDied","Data":"b3b25bff5cadf515b378520c349ad87ec99465ee06e70d30ab32b357543ace1f"} Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.892993 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.925368 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5"] Feb 27 16:30:41 crc kubenswrapper[4751]: E0227 16:30:41.925649 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerName="extract-utilities" Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.925666 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerName="extract-utilities" Feb 27 16:30:41 crc kubenswrapper[4751]: E0227 16:30:41.925684 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0324491-719a-4729-bd50-a4f7a94360c5" containerName="controller-manager" Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.925693 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0324491-719a-4729-bd50-a4f7a94360c5" containerName="controller-manager" Feb 27 16:30:41 crc kubenswrapper[4751]: E0227 16:30:41.925707 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerName="extract-content" Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.925716 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerName="extract-content" Feb 27 16:30:41 crc kubenswrapper[4751]: E0227 16:30:41.925725 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerName="registry-server" Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.925733 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerName="registry-server" Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.925886 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d42be40-69b6-49a3-a4ad-ff74df0c284e" containerName="registry-server" Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.925915 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0324491-719a-4729-bd50-a4f7a94360c5" containerName="controller-manager" Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.926495 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:41 crc kubenswrapper[4751]: I0227 16:30:41.944610 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5"] Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.007000 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-client-ca\") pod \"f0324491-719a-4729-bd50-a4f7a94360c5\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.007084 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-config\") pod \"f0324491-719a-4729-bd50-a4f7a94360c5\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.007355 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-proxy-ca-bundles\") pod \"f0324491-719a-4729-bd50-a4f7a94360c5\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.007460 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqfck\" (UniqueName: \"kubernetes.io/projected/f0324491-719a-4729-bd50-a4f7a94360c5-kube-api-access-cqfck\") pod \"f0324491-719a-4729-bd50-a4f7a94360c5\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.007500 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0324491-719a-4729-bd50-a4f7a94360c5-serving-cert\") pod \"f0324491-719a-4729-bd50-a4f7a94360c5\" (UID: \"f0324491-719a-4729-bd50-a4f7a94360c5\") " Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.007680 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glc4k\" (UniqueName: \"kubernetes.io/projected/a44db268-8fca-4c87-ac3b-137c6d81dae2-kube-api-access-glc4k\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.007725 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-client-ca\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.007843 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-proxy-ca-bundles\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.007907 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-config\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.008054 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f0324491-719a-4729-bd50-a4f7a94360c5" (UID: "f0324491-719a-4729-bd50-a4f7a94360c5"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.008075 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a44db268-8fca-4c87-ac3b-137c6d81dae2-serving-cert\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.008166 4751 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.008256 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-client-ca" (OuterVolumeSpecName: "client-ca") pod "f0324491-719a-4729-bd50-a4f7a94360c5" (UID: "f0324491-719a-4729-bd50-a4f7a94360c5"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.008753 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-config" (OuterVolumeSpecName: "config") pod "f0324491-719a-4729-bd50-a4f7a94360c5" (UID: "f0324491-719a-4729-bd50-a4f7a94360c5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.014601 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0324491-719a-4729-bd50-a4f7a94360c5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f0324491-719a-4729-bd50-a4f7a94360c5" (UID: "f0324491-719a-4729-bd50-a4f7a94360c5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.014613 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0324491-719a-4729-bd50-a4f7a94360c5-kube-api-access-cqfck" (OuterVolumeSpecName: "kube-api-access-cqfck") pod "f0324491-719a-4729-bd50-a4f7a94360c5" (UID: "f0324491-719a-4729-bd50-a4f7a94360c5"). InnerVolumeSpecName "kube-api-access-cqfck". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.109271 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-client-ca\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.110186 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-proxy-ca-bundles\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.110306 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-config\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.110508 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a44db268-8fca-4c87-ac3b-137c6d81dae2-serving-cert\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.110654 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-client-ca\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.110794 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glc4k\" (UniqueName: \"kubernetes.io/projected/a44db268-8fca-4c87-ac3b-137c6d81dae2-kube-api-access-glc4k\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.110930 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.111009 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0324491-719a-4729-bd50-a4f7a94360c5-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.111087 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqfck\" (UniqueName: \"kubernetes.io/projected/f0324491-719a-4729-bd50-a4f7a94360c5-kube-api-access-cqfck\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.111184 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0324491-719a-4729-bd50-a4f7a94360c5-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.111934 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-config\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.111939 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-proxy-ca-bundles\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.114860 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a44db268-8fca-4c87-ac3b-137c6d81dae2-serving-cert\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.133016 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glc4k\" (UniqueName: \"kubernetes.io/projected/a44db268-8fca-4c87-ac3b-137c6d81dae2-kube-api-access-glc4k\") pod \"controller-manager-7dbf64f7d4-mzll5\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.246914 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.418641 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" event={"ID":"f0324491-719a-4729-bd50-a4f7a94360c5","Type":"ContainerDied","Data":"8ace42f01df9687c1884ff16d961c2e4e2e41a7594dfc360edd52c715261cd54"} Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.418690 4751 scope.go:117] "RemoveContainer" containerID="b3b25bff5cadf515b378520c349ad87ec99465ee06e70d30ab32b357543ace1f" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.418701 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6698df76c9-69fmx" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.450077 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6698df76c9-69fmx"] Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.455585 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6698df76c9-69fmx"] Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.528232 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0324491-719a-4729-bd50-a4f7a94360c5" path="/var/lib/kubelet/pods/f0324491-719a-4729-bd50-a4f7a94360c5/volumes" Feb 27 16:30:42 crc kubenswrapper[4751]: I0227 16:30:42.744805 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5"] Feb 27 16:30:43 crc kubenswrapper[4751]: I0227 16:30:43.186781 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Feb 27 16:30:43 crc kubenswrapper[4751]: I0227 16:30:43.425826 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" event={"ID":"a44db268-8fca-4c87-ac3b-137c6d81dae2","Type":"ContainerStarted","Data":"7f2d584a79402d0c854900a8ecc41c19c7c66b8361901cab7fd5f828f4d48c1f"} Feb 27 16:30:43 crc kubenswrapper[4751]: I0227 16:30:43.425868 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" event={"ID":"a44db268-8fca-4c87-ac3b-137c6d81dae2","Type":"ContainerStarted","Data":"74c9ec08f875ff8b04ca9aef85201b61911247d0c0924056921547d87dcb4670"} Feb 27 16:30:43 crc kubenswrapper[4751]: I0227 16:30:43.426017 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:43 crc kubenswrapper[4751]: I0227 16:30:43.431880 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:30:43 crc kubenswrapper[4751]: I0227 16:30:43.442438 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" podStartSLOduration=4.442425108 podStartE2EDuration="4.442425108s" podCreationTimestamp="2026-02-27 16:30:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:30:43.441435752 +0000 UTC m=+405.588450199" watchObservedRunningTime="2026-02-27 16:30:43.442425108 +0000 UTC m=+405.589439545" Feb 27 16:30:44 crc kubenswrapper[4751]: I0227 16:30:44.070704 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Feb 27 16:30:45 crc kubenswrapper[4751]: I0227 16:30:45.156580 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Feb 27 16:31:28 crc kubenswrapper[4751]: I0227 16:31:28.917856 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:31:28 crc kubenswrapper[4751]: I0227 16:31:28.918446 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:31:39 crc kubenswrapper[4751]: I0227 16:31:39.528272 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5"] Feb 27 16:31:39 crc kubenswrapper[4751]: I0227 16:31:39.535859 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" podUID="a44db268-8fca-4c87-ac3b-137c6d81dae2" containerName="controller-manager" containerID="cri-o://7f2d584a79402d0c854900a8ecc41c19c7c66b8361901cab7fd5f828f4d48c1f" gracePeriod=30 Feb 27 16:31:39 crc kubenswrapper[4751]: I0227 16:31:39.842563 4751 generic.go:334] "Generic (PLEG): container finished" podID="a44db268-8fca-4c87-ac3b-137c6d81dae2" containerID="7f2d584a79402d0c854900a8ecc41c19c7c66b8361901cab7fd5f828f4d48c1f" exitCode=0 Feb 27 16:31:39 crc kubenswrapper[4751]: I0227 16:31:39.843294 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" event={"ID":"a44db268-8fca-4c87-ac3b-137c6d81dae2","Type":"ContainerDied","Data":"7f2d584a79402d0c854900a8ecc41c19c7c66b8361901cab7fd5f828f4d48c1f"} Feb 27 16:31:39 crc kubenswrapper[4751]: I0227 16:31:39.944949 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.113876 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-config\") pod \"a44db268-8fca-4c87-ac3b-137c6d81dae2\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.113981 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-proxy-ca-bundles\") pod \"a44db268-8fca-4c87-ac3b-137c6d81dae2\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.114114 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a44db268-8fca-4c87-ac3b-137c6d81dae2-serving-cert\") pod \"a44db268-8fca-4c87-ac3b-137c6d81dae2\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.114145 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-client-ca\") pod \"a44db268-8fca-4c87-ac3b-137c6d81dae2\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.114200 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glc4k\" (UniqueName: \"kubernetes.io/projected/a44db268-8fca-4c87-ac3b-137c6d81dae2-kube-api-access-glc4k\") pod \"a44db268-8fca-4c87-ac3b-137c6d81dae2\" (UID: \"a44db268-8fca-4c87-ac3b-137c6d81dae2\") " Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.114954 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "a44db268-8fca-4c87-ac3b-137c6d81dae2" (UID: "a44db268-8fca-4c87-ac3b-137c6d81dae2"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.114983 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-config" (OuterVolumeSpecName: "config") pod "a44db268-8fca-4c87-ac3b-137c6d81dae2" (UID: "a44db268-8fca-4c87-ac3b-137c6d81dae2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.115177 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-client-ca" (OuterVolumeSpecName: "client-ca") pod "a44db268-8fca-4c87-ac3b-137c6d81dae2" (UID: "a44db268-8fca-4c87-ac3b-137c6d81dae2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.119912 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a44db268-8fca-4c87-ac3b-137c6d81dae2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a44db268-8fca-4c87-ac3b-137c6d81dae2" (UID: "a44db268-8fca-4c87-ac3b-137c6d81dae2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.122463 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a44db268-8fca-4c87-ac3b-137c6d81dae2-kube-api-access-glc4k" (OuterVolumeSpecName: "kube-api-access-glc4k") pod "a44db268-8fca-4c87-ac3b-137c6d81dae2" (UID: "a44db268-8fca-4c87-ac3b-137c6d81dae2"). InnerVolumeSpecName "kube-api-access-glc4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.215751 4751 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a44db268-8fca-4c87-ac3b-137c6d81dae2-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.215800 4751 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-client-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.215811 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glc4k\" (UniqueName: \"kubernetes.io/projected/a44db268-8fca-4c87-ac3b-137c6d81dae2-kube-api-access-glc4k\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.215826 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.215836 4751 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a44db268-8fca-4c87-ac3b-137c6d81dae2-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.452078 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ksw46"] Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.452703 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ksw46" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerName="registry-server" containerID="cri-o://1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2" gracePeriod=30 Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.454906 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2" cmd=["grpc_health_probe","-addr=:50051"] Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.460775 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2" cmd=["grpc_health_probe","-addr=:50051"] Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.464505 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2" cmd=["grpc_health_probe","-addr=:50051"] Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.469635 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2" cmd=["grpc_health_probe","-addr=:50051"] Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.469725 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Liveness" pod="openshift-marketplace/certified-operators-ksw46" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerName="registry-server" Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.469808 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2" cmd=["grpc_health_probe","-addr=:50051"] Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.472304 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-94pcv"] Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.472916 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2" cmd=["grpc_health_probe","-addr=:50051"] Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.473082 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/certified-operators-ksw46" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerName="registry-server" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.473149 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-94pcv" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerName="registry-server" containerID="cri-o://70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc" gracePeriod=30 Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.485983 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mw4mn"] Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.486381 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" podUID="6efd7ce0-9b49-4c51-accf-3efcfc1188e6" containerName="marketplace-operator" containerID="cri-o://ac44ab100ee04292ad4c40faef41bf61ca615c7ced92e5e7c2506100e5c38686" gracePeriod=30 Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.496520 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n8xtl"] Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.496832 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-n8xtl" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" containerName="registry-server" containerID="cri-o://491e99996e1d9d684128abd09f5e7ce6477de0245c550dd66ee1b301c746f5e9" gracePeriod=30 Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.502604 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r48bt"] Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.503007 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r48bt" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerName="registry-server" containerID="cri-o://e2e37554a3c5799746ecbb9b307a55d39109492c3ae5401366da7cd7237fc649" gracePeriod=30 Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.515950 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kbkfj"] Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.516219 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a44db268-8fca-4c87-ac3b-137c6d81dae2" containerName="controller-manager" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.516242 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a44db268-8fca-4c87-ac3b-137c6d81dae2" containerName="controller-manager" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.516368 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a44db268-8fca-4c87-ac3b-137c6d81dae2" containerName="controller-manager" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.516933 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.551600 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kbkfj"] Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.622307 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c88a4f09-3810-4b81-9a96-7158892ac367-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kbkfj\" (UID: \"c88a4f09-3810-4b81-9a96-7158892ac367\") " pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.622408 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rd74l\" (UniqueName: \"kubernetes.io/projected/c88a4f09-3810-4b81-9a96-7158892ac367-kube-api-access-rd74l\") pod \"marketplace-operator-79b997595-kbkfj\" (UID: \"c88a4f09-3810-4b81-9a96-7158892ac367\") " pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.622603 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c88a4f09-3810-4b81-9a96-7158892ac367-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kbkfj\" (UID: \"c88a4f09-3810-4b81-9a96-7158892ac367\") " pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.627699 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc is running failed: container process not found" containerID="70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc" cmd=["grpc_health_probe","-addr=:50051"] Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.628922 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc is running failed: container process not found" containerID="70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc" cmd=["grpc_health_probe","-addr=:50051"] Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.629225 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc is running failed: container process not found" containerID="70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc" cmd=["grpc_health_probe","-addr=:50051"] Feb 27 16:31:40 crc kubenswrapper[4751]: E0227 16:31:40.629265 4751 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-94pcv" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerName="registry-server" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.723972 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rd74l\" (UniqueName: \"kubernetes.io/projected/c88a4f09-3810-4b81-9a96-7158892ac367-kube-api-access-rd74l\") pod \"marketplace-operator-79b997595-kbkfj\" (UID: \"c88a4f09-3810-4b81-9a96-7158892ac367\") " pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.724545 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c88a4f09-3810-4b81-9a96-7158892ac367-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kbkfj\" (UID: \"c88a4f09-3810-4b81-9a96-7158892ac367\") " pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.724793 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c88a4f09-3810-4b81-9a96-7158892ac367-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kbkfj\" (UID: \"c88a4f09-3810-4b81-9a96-7158892ac367\") " pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.726191 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c88a4f09-3810-4b81-9a96-7158892ac367-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kbkfj\" (UID: \"c88a4f09-3810-4b81-9a96-7158892ac367\") " pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.729726 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c88a4f09-3810-4b81-9a96-7158892ac367-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kbkfj\" (UID: \"c88a4f09-3810-4b81-9a96-7158892ac367\") " pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.730788 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6698df76c9-sstcr"] Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.732197 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.749157 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rd74l\" (UniqueName: \"kubernetes.io/projected/c88a4f09-3810-4b81-9a96-7158892ac367-kube-api-access-rd74l\") pod \"marketplace-operator-79b997595-kbkfj\" (UID: \"c88a4f09-3810-4b81-9a96-7158892ac367\") " pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.763125 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6698df76c9-sstcr"] Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.825489 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4bb730d3-952a-46cd-b8cf-be352483f49b-config\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.825576 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pvsv\" (UniqueName: \"kubernetes.io/projected/4bb730d3-952a-46cd-b8cf-be352483f49b-kube-api-access-5pvsv\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.825602 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4bb730d3-952a-46cd-b8cf-be352483f49b-client-ca\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.825825 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4bb730d3-952a-46cd-b8cf-be352483f49b-proxy-ca-bundles\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.825907 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4bb730d3-952a-46cd-b8cf-be352483f49b-serving-cert\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.852055 4751 generic.go:334] "Generic (PLEG): container finished" podID="317aef2b-3749-4a30-afc6-96f40516eae7" containerID="491e99996e1d9d684128abd09f5e7ce6477de0245c550dd66ee1b301c746f5e9" exitCode=0 Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.852792 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8xtl" event={"ID":"317aef2b-3749-4a30-afc6-96f40516eae7","Type":"ContainerDied","Data":"491e99996e1d9d684128abd09f5e7ce6477de0245c550dd66ee1b301c746f5e9"} Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.868135 4751 generic.go:334] "Generic (PLEG): container finished" podID="6efd7ce0-9b49-4c51-accf-3efcfc1188e6" containerID="ac44ab100ee04292ad4c40faef41bf61ca615c7ced92e5e7c2506100e5c38686" exitCode=0 Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.868237 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" event={"ID":"6efd7ce0-9b49-4c51-accf-3efcfc1188e6","Type":"ContainerDied","Data":"ac44ab100ee04292ad4c40faef41bf61ca615c7ced92e5e7c2506100e5c38686"} Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.875390 4751 generic.go:334] "Generic (PLEG): container finished" podID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerID="70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc" exitCode=0 Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.875512 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-94pcv" event={"ID":"7412acf1-544d-4fbb-a538-2071988c8ae1","Type":"ContainerDied","Data":"70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc"} Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.880323 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.880355 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5" event={"ID":"a44db268-8fca-4c87-ac3b-137c6d81dae2","Type":"ContainerDied","Data":"74c9ec08f875ff8b04ca9aef85201b61911247d0c0924056921547d87dcb4670"} Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.880461 4751 scope.go:117] "RemoveContainer" containerID="7f2d584a79402d0c854900a8ecc41c19c7c66b8361901cab7fd5f828f4d48c1f" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.904884 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.918495 4751 generic.go:334] "Generic (PLEG): container finished" podID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerID="e2e37554a3c5799746ecbb9b307a55d39109492c3ae5401366da7cd7237fc649" exitCode=0 Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.918591 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r48bt" event={"ID":"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0","Type":"ContainerDied","Data":"e2e37554a3c5799746ecbb9b307a55d39109492c3ae5401366da7cd7237fc649"} Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.927202 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4bb730d3-952a-46cd-b8cf-be352483f49b-config\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.927284 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pvsv\" (UniqueName: \"kubernetes.io/projected/4bb730d3-952a-46cd-b8cf-be352483f49b-kube-api-access-5pvsv\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.927314 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4bb730d3-952a-46cd-b8cf-be352483f49b-client-ca\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.927353 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4bb730d3-952a-46cd-b8cf-be352483f49b-proxy-ca-bundles\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.932755 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4bb730d3-952a-46cd-b8cf-be352483f49b-serving-cert\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.937136 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4bb730d3-952a-46cd-b8cf-be352483f49b-client-ca\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.938886 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4bb730d3-952a-46cd-b8cf-be352483f49b-proxy-ca-bundles\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.943092 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4bb730d3-952a-46cd-b8cf-be352483f49b-serving-cert\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.948098 4751 generic.go:334] "Generic (PLEG): container finished" podID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerID="1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2" exitCode=0 Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.948162 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksw46" event={"ID":"1c35558f-cd8a-4a04-baca-ea445d76b712","Type":"ContainerDied","Data":"1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2"} Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.948227 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksw46" event={"ID":"1c35558f-cd8a-4a04-baca-ea445d76b712","Type":"ContainerDied","Data":"b4517f5fa75ca995852be11bc16546544d1b7baf029f9682733d6b2cdaa21274"} Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.948241 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4517f5fa75ca995852be11bc16546544d1b7baf029f9682733d6b2cdaa21274" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.955711 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pvsv\" (UniqueName: \"kubernetes.io/projected/4bb730d3-952a-46cd-b8cf-be352483f49b-kube-api-access-5pvsv\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.956884 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4bb730d3-952a-46cd-b8cf-be352483f49b-config\") pod \"controller-manager-6698df76c9-sstcr\" (UID: \"4bb730d3-952a-46cd-b8cf-be352483f49b\") " pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:40 crc kubenswrapper[4751]: I0227 16:31:40.991186 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.018612 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5"] Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.028204 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7dbf64f7d4-mzll5"] Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.031817 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.035064 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.037551 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-operator-metrics\") pod \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.037602 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-trusted-ca\") pod \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.037639 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4k6v\" (UniqueName: \"kubernetes.io/projected/1c35558f-cd8a-4a04-baca-ea445d76b712-kube-api-access-q4k6v\") pod \"1c35558f-cd8a-4a04-baca-ea445d76b712\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.037684 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69xtc\" (UniqueName: \"kubernetes.io/projected/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-kube-api-access-69xtc\") pod \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\" (UID: \"6efd7ce0-9b49-4c51-accf-3efcfc1188e6\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.037723 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-catalog-content\") pod \"1c35558f-cd8a-4a04-baca-ea445d76b712\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.039940 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "6efd7ce0-9b49-4c51-accf-3efcfc1188e6" (UID: "6efd7ce0-9b49-4c51-accf-3efcfc1188e6"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.042054 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "6efd7ce0-9b49-4c51-accf-3efcfc1188e6" (UID: "6efd7ce0-9b49-4c51-accf-3efcfc1188e6"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.042696 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c35558f-cd8a-4a04-baca-ea445d76b712-kube-api-access-q4k6v" (OuterVolumeSpecName: "kube-api-access-q4k6v") pod "1c35558f-cd8a-4a04-baca-ea445d76b712" (UID: "1c35558f-cd8a-4a04-baca-ea445d76b712"). InnerVolumeSpecName "kube-api-access-q4k6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.051077 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.063799 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-kube-api-access-69xtc" (OuterVolumeSpecName: "kube-api-access-69xtc") pod "6efd7ce0-9b49-4c51-accf-3efcfc1188e6" (UID: "6efd7ce0-9b49-4c51-accf-3efcfc1188e6"). InnerVolumeSpecName "kube-api-access-69xtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.088442 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.107991 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138298 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-utilities\") pod \"7412acf1-544d-4fbb-a538-2071988c8ae1\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138339 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-catalog-content\") pod \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138368 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4nql\" (UniqueName: \"kubernetes.io/projected/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-kube-api-access-s4nql\") pod \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138432 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-utilities\") pod \"317aef2b-3749-4a30-afc6-96f40516eae7\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138474 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-catalog-content\") pod \"7412acf1-544d-4fbb-a538-2071988c8ae1\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138498 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-utilities\") pod \"1c35558f-cd8a-4a04-baca-ea445d76b712\" (UID: \"1c35558f-cd8a-4a04-baca-ea445d76b712\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138517 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-catalog-content\") pod \"317aef2b-3749-4a30-afc6-96f40516eae7\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138560 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5r66n\" (UniqueName: \"kubernetes.io/projected/7412acf1-544d-4fbb-a538-2071988c8ae1-kube-api-access-5r66n\") pod \"7412acf1-544d-4fbb-a538-2071988c8ae1\" (UID: \"7412acf1-544d-4fbb-a538-2071988c8ae1\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138585 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-utilities\") pod \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\" (UID: \"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138632 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26mdd\" (UniqueName: \"kubernetes.io/projected/317aef2b-3749-4a30-afc6-96f40516eae7-kube-api-access-26mdd\") pod \"317aef2b-3749-4a30-afc6-96f40516eae7\" (UID: \"317aef2b-3749-4a30-afc6-96f40516eae7\") " Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138796 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69xtc\" (UniqueName: \"kubernetes.io/projected/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-kube-api-access-69xtc\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138814 4751 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138827 4751 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6efd7ce0-9b49-4c51-accf-3efcfc1188e6-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.138840 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4k6v\" (UniqueName: \"kubernetes.io/projected/1c35558f-cd8a-4a04-baca-ea445d76b712-kube-api-access-q4k6v\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.144211 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-utilities" (OuterVolumeSpecName: "utilities") pod "1c35558f-cd8a-4a04-baca-ea445d76b712" (UID: "1c35558f-cd8a-4a04-baca-ea445d76b712"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.145665 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-utilities" (OuterVolumeSpecName: "utilities") pod "cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" (UID: "cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.146797 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-utilities" (OuterVolumeSpecName: "utilities") pod "317aef2b-3749-4a30-afc6-96f40516eae7" (UID: "317aef2b-3749-4a30-afc6-96f40516eae7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.155496 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-kube-api-access-s4nql" (OuterVolumeSpecName: "kube-api-access-s4nql") pod "cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" (UID: "cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0"). InnerVolumeSpecName "kube-api-access-s4nql". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.156326 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7412acf1-544d-4fbb-a538-2071988c8ae1-kube-api-access-5r66n" (OuterVolumeSpecName: "kube-api-access-5r66n") pod "7412acf1-544d-4fbb-a538-2071988c8ae1" (UID: "7412acf1-544d-4fbb-a538-2071988c8ae1"). InnerVolumeSpecName "kube-api-access-5r66n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.160319 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-utilities" (OuterVolumeSpecName: "utilities") pod "7412acf1-544d-4fbb-a538-2071988c8ae1" (UID: "7412acf1-544d-4fbb-a538-2071988c8ae1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.173652 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/317aef2b-3749-4a30-afc6-96f40516eae7-kube-api-access-26mdd" (OuterVolumeSpecName: "kube-api-access-26mdd") pod "317aef2b-3749-4a30-afc6-96f40516eae7" (UID: "317aef2b-3749-4a30-afc6-96f40516eae7"). InnerVolumeSpecName "kube-api-access-26mdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.178101 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c35558f-cd8a-4a04-baca-ea445d76b712" (UID: "1c35558f-cd8a-4a04-baca-ea445d76b712"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.192997 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "317aef2b-3749-4a30-afc6-96f40516eae7" (UID: "317aef2b-3749-4a30-afc6-96f40516eae7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.240130 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.240173 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.240184 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c35558f-cd8a-4a04-baca-ea445d76b712-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.240194 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/317aef2b-3749-4a30-afc6-96f40516eae7-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.240206 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5r66n\" (UniqueName: \"kubernetes.io/projected/7412acf1-544d-4fbb-a538-2071988c8ae1-kube-api-access-5r66n\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.240218 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.240228 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26mdd\" (UniqueName: \"kubernetes.io/projected/317aef2b-3749-4a30-afc6-96f40516eae7-kube-api-access-26mdd\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.240239 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.240249 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4nql\" (UniqueName: \"kubernetes.io/projected/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-kube-api-access-s4nql\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.242775 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7412acf1-544d-4fbb-a538-2071988c8ae1" (UID: "7412acf1-544d-4fbb-a538-2071988c8ae1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.297562 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" (UID: "cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.320732 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6698df76c9-sstcr"] Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.341452 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.341502 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7412acf1-544d-4fbb-a538-2071988c8ae1-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.495914 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kbkfj"] Feb 27 16:31:41 crc kubenswrapper[4751]: W0227 16:31:41.508817 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc88a4f09_3810_4b81_9a96_7158892ac367.slice/crio-d4a06073c8c0ead953547d49a2efc270dce94facec3dfcb672342c0b7312b704 WatchSource:0}: Error finding container d4a06073c8c0ead953547d49a2efc270dce94facec3dfcb672342c0b7312b704: Status 404 returned error can't find the container with id d4a06073c8c0ead953547d49a2efc270dce94facec3dfcb672342c0b7312b704 Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.724699 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-529dz"] Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725011 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerName="extract-content" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725031 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerName="extract-content" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725045 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725054 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725064 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" containerName="extract-content" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725072 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" containerName="extract-content" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725086 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725094 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725112 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerName="extract-utilities" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725121 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerName="extract-utilities" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725131 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725142 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725157 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6efd7ce0-9b49-4c51-accf-3efcfc1188e6" containerName="marketplace-operator" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725166 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6efd7ce0-9b49-4c51-accf-3efcfc1188e6" containerName="marketplace-operator" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725177 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerName="extract-content" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725185 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerName="extract-content" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725197 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" containerName="extract-utilities" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725205 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" containerName="extract-utilities" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725218 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerName="extract-content" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725228 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerName="extract-content" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725241 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerName="extract-utilities" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725251 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerName="extract-utilities" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725268 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725278 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: E0227 16:31:41.725296 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerName="extract-utilities" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725304 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerName="extract-utilities" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725450 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725468 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725483 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725493 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" containerName="registry-server" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.725502 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6efd7ce0-9b49-4c51-accf-3efcfc1188e6" containerName="marketplace-operator" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.726044 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.745754 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-529dz"] Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.776963 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4d4a6f07-ff62-4c15-9db0-e90df91c181b-registry-certificates\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.777009 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b56hw\" (UniqueName: \"kubernetes.io/projected/4d4a6f07-ff62-4c15-9db0-e90df91c181b-kube-api-access-b56hw\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.777040 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.777188 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4d4a6f07-ff62-4c15-9db0-e90df91c181b-bound-sa-token\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.777310 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4d4a6f07-ff62-4c15-9db0-e90df91c181b-registry-tls\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.777371 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d4a6f07-ff62-4c15-9db0-e90df91c181b-trusted-ca\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.777498 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4d4a6f07-ff62-4c15-9db0-e90df91c181b-installation-pull-secrets\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.777530 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4d4a6f07-ff62-4c15-9db0-e90df91c181b-ca-trust-extracted\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.806548 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.879182 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4d4a6f07-ff62-4c15-9db0-e90df91c181b-registry-tls\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.879250 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d4a6f07-ff62-4c15-9db0-e90df91c181b-trusted-ca\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.879297 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4d4a6f07-ff62-4c15-9db0-e90df91c181b-installation-pull-secrets\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.879322 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4d4a6f07-ff62-4c15-9db0-e90df91c181b-ca-trust-extracted\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.879359 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4d4a6f07-ff62-4c15-9db0-e90df91c181b-registry-certificates\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.879384 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b56hw\" (UniqueName: \"kubernetes.io/projected/4d4a6f07-ff62-4c15-9db0-e90df91c181b-kube-api-access-b56hw\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.879443 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4d4a6f07-ff62-4c15-9db0-e90df91c181b-bound-sa-token\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.880092 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4d4a6f07-ff62-4c15-9db0-e90df91c181b-ca-trust-extracted\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.880777 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4d4a6f07-ff62-4c15-9db0-e90df91c181b-registry-certificates\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.880852 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d4a6f07-ff62-4c15-9db0-e90df91c181b-trusted-ca\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.884165 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4d4a6f07-ff62-4c15-9db0-e90df91c181b-registry-tls\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.885863 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4d4a6f07-ff62-4c15-9db0-e90df91c181b-installation-pull-secrets\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.901989 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4d4a6f07-ff62-4c15-9db0-e90df91c181b-bound-sa-token\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.902083 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b56hw\" (UniqueName: \"kubernetes.io/projected/4d4a6f07-ff62-4c15-9db0-e90df91c181b-kube-api-access-b56hw\") pod \"image-registry-66df7c8f76-529dz\" (UID: \"4d4a6f07-ff62-4c15-9db0-e90df91c181b\") " pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.963602 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" event={"ID":"c88a4f09-3810-4b81-9a96-7158892ac367","Type":"ContainerStarted","Data":"d4a06073c8c0ead953547d49a2efc270dce94facec3dfcb672342c0b7312b704"} Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.965093 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.965139 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mw4mn" event={"ID":"6efd7ce0-9b49-4c51-accf-3efcfc1188e6","Type":"ContainerDied","Data":"64ca60642130ddd2d989b28672936ee38d040ddfdf2e67c1d1652207a1914332"} Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.965198 4751 scope.go:117] "RemoveContainer" containerID="ac44ab100ee04292ad4c40faef41bf61ca615c7ced92e5e7c2506100e5c38686" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.966838 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" event={"ID":"4bb730d3-952a-46cd-b8cf-be352483f49b","Type":"ContainerStarted","Data":"9d969c97820bdca204bec82d9012477a85ab586a0d7bbb8f267184e4b8bfacf9"} Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.969130 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.970174 4751 patch_prober.go:28] interesting pod/controller-manager-6698df76c9-sstcr container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.73:8443/healthz\": dial tcp 10.217.0.73:8443: connect: connection refused" start-of-body= Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.970209 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" podUID="4bb730d3-952a-46cd-b8cf-be352483f49b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.73:8443/healthz\": dial tcp 10.217.0.73:8443: connect: connection refused" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.970625 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-94pcv" event={"ID":"7412acf1-544d-4fbb-a538-2071988c8ae1","Type":"ContainerDied","Data":"298a8a1a0704ff36737e976a60cc129a06cc2f2b28a956480535e5c4e31b89bb"} Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.970645 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-94pcv" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.974152 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r48bt" event={"ID":"cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0","Type":"ContainerDied","Data":"8bbef1ff2e3802c7153d56db6a1dc1340de7ff13a4208b50afa82225dd71f467"} Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.974256 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r48bt" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.978069 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksw46" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.978094 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8xtl" event={"ID":"317aef2b-3749-4a30-afc6-96f40516eae7","Type":"ContainerDied","Data":"efd45ad4a9384a2151cc2b916ccc1c9736c61b8d2928b3ad4c7eac7c7a12916a"} Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.978164 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n8xtl" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.995372 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" podStartSLOduration=2.995352637 podStartE2EDuration="2.995352637s" podCreationTimestamp="2026-02-27 16:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:31:41.992352328 +0000 UTC m=+464.139366775" watchObservedRunningTime="2026-02-27 16:31:41.995352637 +0000 UTC m=+464.142367084" Feb 27 16:31:41 crc kubenswrapper[4751]: I0227 16:31:41.996854 4751 scope.go:117] "RemoveContainer" containerID="70ef3570bab755daece0f426d5090821cc7ffcfd0b7dc19050930c97ec31c8fc" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.016319 4751 scope.go:117] "RemoveContainer" containerID="0ceac9ff4debb69a58c25fa3a347c8ed1cefe8d647e8b0a88066b001de4d57d1" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.041040 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mw4mn"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.048671 4751 scope.go:117] "RemoveContainer" containerID="37d6324a08c50ef22c14f699bf90eb965bf368048332b99e50383ab8c0ad69b0" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.054768 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mw4mn"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.061491 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-94pcv"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.071308 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-94pcv"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.073192 4751 scope.go:117] "RemoveContainer" containerID="e2e37554a3c5799746ecbb9b307a55d39109492c3ae5401366da7cd7237fc649" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.080468 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n8xtl"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.090634 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-n8xtl"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.109316 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ksw46"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.109849 4751 scope.go:117] "RemoveContainer" containerID="a356988ecfdd3ffab597de21eb5d1e582904600d03089de82cbd78e43f0851a0" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.115754 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ksw46"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.119049 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r48bt"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.124631 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.133481 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r48bt"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.147506 4751 scope.go:117] "RemoveContainer" containerID="54db31796b55c0b7c86de429a4044167ed26177ee9d2e09d3cbaa0636b09a26e" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.180646 4751 scope.go:117] "RemoveContainer" containerID="491e99996e1d9d684128abd09f5e7ce6477de0245c550dd66ee1b301c746f5e9" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.208071 4751 scope.go:117] "RemoveContainer" containerID="15ddfd4810d51df8bfa730d66d27f9aca3da0d582099579a99938920675f90c8" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.236427 4751 scope.go:117] "RemoveContainer" containerID="63a8255fdb0eb404763761e4e4ae89220717f807a19571c937fda7eb2e74680e" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.482574 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-529dz"] Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.528933 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c35558f-cd8a-4a04-baca-ea445d76b712" path="/var/lib/kubelet/pods/1c35558f-cd8a-4a04-baca-ea445d76b712/volumes" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.529952 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="317aef2b-3749-4a30-afc6-96f40516eae7" path="/var/lib/kubelet/pods/317aef2b-3749-4a30-afc6-96f40516eae7/volumes" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.530791 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6efd7ce0-9b49-4c51-accf-3efcfc1188e6" path="/var/lib/kubelet/pods/6efd7ce0-9b49-4c51-accf-3efcfc1188e6/volumes" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.531776 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7412acf1-544d-4fbb-a538-2071988c8ae1" path="/var/lib/kubelet/pods/7412acf1-544d-4fbb-a538-2071988c8ae1/volumes" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.532491 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a44db268-8fca-4c87-ac3b-137c6d81dae2" path="/var/lib/kubelet/pods/a44db268-8fca-4c87-ac3b-137c6d81dae2/volumes" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.533033 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0" path="/var/lib/kubelet/pods/cd22dcd4-2184-46b5-9c2f-ed1a65c64fd0/volumes" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.988137 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" event={"ID":"c88a4f09-3810-4b81-9a96-7158892ac367","Type":"ContainerStarted","Data":"af632a17640badef4fb855b12f71e4075610cc5e802d6b34d3d208d2f13c8d1e"} Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.989125 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.994133 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" event={"ID":"4bb730d3-952a-46cd-b8cf-be352483f49b","Type":"ContainerStarted","Data":"89702cf2c133bd3dbbb1ba13068abe1a4b5dc8c166b37e2bc4e38d8a34780ac7"} Feb 27 16:31:42 crc kubenswrapper[4751]: I0227 16:31:42.995902 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.000600 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6698df76c9-sstcr" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.001353 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-529dz" event={"ID":"4d4a6f07-ff62-4c15-9db0-e90df91c181b","Type":"ContainerStarted","Data":"7da34acde16910382111ef746f68b2d90eb65457376ad7e09537705fa295eec0"} Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.001416 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-529dz" event={"ID":"4d4a6f07-ff62-4c15-9db0-e90df91c181b","Type":"ContainerStarted","Data":"4973c42440b125d8810b66091f9c354bd7ec75c82a1753ded996b833564bd152"} Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.001559 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.013167 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-kbkfj" podStartSLOduration=3.013138823 podStartE2EDuration="3.013138823s" podCreationTimestamp="2026-02-27 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:31:43.006630252 +0000 UTC m=+465.153644709" watchObservedRunningTime="2026-02-27 16:31:43.013138823 +0000 UTC m=+465.160153270" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.074464 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-529dz" podStartSLOduration=2.074437001 podStartE2EDuration="2.074437001s" podCreationTimestamp="2026-02-27 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:31:43.073750413 +0000 UTC m=+465.220764870" watchObservedRunningTime="2026-02-27 16:31:43.074437001 +0000 UTC m=+465.221451458" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.294080 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j626z"] Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.295765 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.298501 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.316216 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5195d38b-7810-4277-b020-5a89a9189dc0-utilities\") pod \"redhat-operators-j626z\" (UID: \"5195d38b-7810-4277-b020-5a89a9189dc0\") " pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.316579 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5195d38b-7810-4277-b020-5a89a9189dc0-catalog-content\") pod \"redhat-operators-j626z\" (UID: \"5195d38b-7810-4277-b020-5a89a9189dc0\") " pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.316610 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmh5l\" (UniqueName: \"kubernetes.io/projected/5195d38b-7810-4277-b020-5a89a9189dc0-kube-api-access-tmh5l\") pod \"redhat-operators-j626z\" (UID: \"5195d38b-7810-4277-b020-5a89a9189dc0\") " pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.316526 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j626z"] Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.417870 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmh5l\" (UniqueName: \"kubernetes.io/projected/5195d38b-7810-4277-b020-5a89a9189dc0-kube-api-access-tmh5l\") pod \"redhat-operators-j626z\" (UID: \"5195d38b-7810-4277-b020-5a89a9189dc0\") " pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.417988 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5195d38b-7810-4277-b020-5a89a9189dc0-utilities\") pod \"redhat-operators-j626z\" (UID: \"5195d38b-7810-4277-b020-5a89a9189dc0\") " pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.418013 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5195d38b-7810-4277-b020-5a89a9189dc0-catalog-content\") pod \"redhat-operators-j626z\" (UID: \"5195d38b-7810-4277-b020-5a89a9189dc0\") " pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.418486 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5195d38b-7810-4277-b020-5a89a9189dc0-catalog-content\") pod \"redhat-operators-j626z\" (UID: \"5195d38b-7810-4277-b020-5a89a9189dc0\") " pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.418643 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5195d38b-7810-4277-b020-5a89a9189dc0-utilities\") pod \"redhat-operators-j626z\" (UID: \"5195d38b-7810-4277-b020-5a89a9189dc0\") " pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.438588 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmh5l\" (UniqueName: \"kubernetes.io/projected/5195d38b-7810-4277-b020-5a89a9189dc0-kube-api-access-tmh5l\") pod \"redhat-operators-j626z\" (UID: \"5195d38b-7810-4277-b020-5a89a9189dc0\") " pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.620017 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.900166 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-f7hzl"] Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.901850 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.903318 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f7hzl"] Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.904731 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.938988 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgl76\" (UniqueName: \"kubernetes.io/projected/203be014-29c5-44e2-bc8d-1a71c1448f57-kube-api-access-cgl76\") pod \"certified-operators-f7hzl\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.939069 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-catalog-content\") pod \"certified-operators-f7hzl\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:43 crc kubenswrapper[4751]: I0227 16:31:43.939100 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-utilities\") pod \"certified-operators-f7hzl\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:44 crc kubenswrapper[4751]: I0227 16:31:44.045152 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgl76\" (UniqueName: \"kubernetes.io/projected/203be014-29c5-44e2-bc8d-1a71c1448f57-kube-api-access-cgl76\") pod \"certified-operators-f7hzl\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:44 crc kubenswrapper[4751]: I0227 16:31:44.045305 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-catalog-content\") pod \"certified-operators-f7hzl\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:44 crc kubenswrapper[4751]: I0227 16:31:44.045358 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-utilities\") pod \"certified-operators-f7hzl\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:44 crc kubenswrapper[4751]: I0227 16:31:44.046001 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-catalog-content\") pod \"certified-operators-f7hzl\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:44 crc kubenswrapper[4751]: I0227 16:31:44.046131 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-utilities\") pod \"certified-operators-f7hzl\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:44 crc kubenswrapper[4751]: I0227 16:31:44.068426 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgl76\" (UniqueName: \"kubernetes.io/projected/203be014-29c5-44e2-bc8d-1a71c1448f57-kube-api-access-cgl76\") pod \"certified-operators-f7hzl\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:44 crc kubenswrapper[4751]: I0227 16:31:44.091284 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j626z"] Feb 27 16:31:44 crc kubenswrapper[4751]: W0227 16:31:44.116177 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5195d38b_7810_4277_b020_5a89a9189dc0.slice/crio-cd8ccef0a999eefaf2897ef00d3b7f73ee08be8a7b36ed8227a685fee9e98b21 WatchSource:0}: Error finding container cd8ccef0a999eefaf2897ef00d3b7f73ee08be8a7b36ed8227a685fee9e98b21: Status 404 returned error can't find the container with id cd8ccef0a999eefaf2897ef00d3b7f73ee08be8a7b36ed8227a685fee9e98b21 Feb 27 16:31:44 crc kubenswrapper[4751]: I0227 16:31:44.224562 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.016213 4751 generic.go:334] "Generic (PLEG): container finished" podID="5195d38b-7810-4277-b020-5a89a9189dc0" containerID="1401a0e5262fe4b908629d1c69fa34eedfb5bee417da65ade10a674424f08da0" exitCode=0 Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.016420 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j626z" event={"ID":"5195d38b-7810-4277-b020-5a89a9189dc0","Type":"ContainerDied","Data":"1401a0e5262fe4b908629d1c69fa34eedfb5bee417da65ade10a674424f08da0"} Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.016646 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j626z" event={"ID":"5195d38b-7810-4277-b020-5a89a9189dc0","Type":"ContainerStarted","Data":"cd8ccef0a999eefaf2897ef00d3b7f73ee08be8a7b36ed8227a685fee9e98b21"} Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.443847 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f7hzl"] Feb 27 16:31:45 crc kubenswrapper[4751]: W0227 16:31:45.454362 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod203be014_29c5_44e2_bc8d_1a71c1448f57.slice/crio-7c2721e5b344978aa00387bbea09520d923f7264d7ccf701f6afe65dfb03e12c WatchSource:0}: Error finding container 7c2721e5b344978aa00387bbea09520d923f7264d7ccf701f6afe65dfb03e12c: Status 404 returned error can't find the container with id 7c2721e5b344978aa00387bbea09520d923f7264d7ccf701f6afe65dfb03e12c Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.703995 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pdccw"] Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.705790 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.707130 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pdccw"] Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.708034 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.769881 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d9a23e1-4ca5-4648-b249-303e9e41a14c-utilities\") pod \"community-operators-pdccw\" (UID: \"9d9a23e1-4ca5-4648-b249-303e9e41a14c\") " pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.769974 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5nvw\" (UniqueName: \"kubernetes.io/projected/9d9a23e1-4ca5-4648-b249-303e9e41a14c-kube-api-access-j5nvw\") pod \"community-operators-pdccw\" (UID: \"9d9a23e1-4ca5-4648-b249-303e9e41a14c\") " pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.770040 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d9a23e1-4ca5-4648-b249-303e9e41a14c-catalog-content\") pod \"community-operators-pdccw\" (UID: \"9d9a23e1-4ca5-4648-b249-303e9e41a14c\") " pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.870955 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5nvw\" (UniqueName: \"kubernetes.io/projected/9d9a23e1-4ca5-4648-b249-303e9e41a14c-kube-api-access-j5nvw\") pod \"community-operators-pdccw\" (UID: \"9d9a23e1-4ca5-4648-b249-303e9e41a14c\") " pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.871033 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d9a23e1-4ca5-4648-b249-303e9e41a14c-catalog-content\") pod \"community-operators-pdccw\" (UID: \"9d9a23e1-4ca5-4648-b249-303e9e41a14c\") " pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.871060 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d9a23e1-4ca5-4648-b249-303e9e41a14c-utilities\") pod \"community-operators-pdccw\" (UID: \"9d9a23e1-4ca5-4648-b249-303e9e41a14c\") " pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.871459 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d9a23e1-4ca5-4648-b249-303e9e41a14c-utilities\") pod \"community-operators-pdccw\" (UID: \"9d9a23e1-4ca5-4648-b249-303e9e41a14c\") " pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.871991 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d9a23e1-4ca5-4648-b249-303e9e41a14c-catalog-content\") pod \"community-operators-pdccw\" (UID: \"9d9a23e1-4ca5-4648-b249-303e9e41a14c\") " pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:45 crc kubenswrapper[4751]: I0227 16:31:45.904894 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5nvw\" (UniqueName: \"kubernetes.io/projected/9d9a23e1-4ca5-4648-b249-303e9e41a14c-kube-api-access-j5nvw\") pod \"community-operators-pdccw\" (UID: \"9d9a23e1-4ca5-4648-b249-303e9e41a14c\") " pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.027084 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j626z" event={"ID":"5195d38b-7810-4277-b020-5a89a9189dc0","Type":"ContainerStarted","Data":"73d2cf9a6113ac375b5a81d7ebfce09f357bfc175fb1c2c7caeeab7d27c72ab9"} Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.031152 4751 generic.go:334] "Generic (PLEG): container finished" podID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerID="559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848" exitCode=0 Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.031207 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7hzl" event={"ID":"203be014-29c5-44e2-bc8d-1a71c1448f57","Type":"ContainerDied","Data":"559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848"} Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.031233 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7hzl" event={"ID":"203be014-29c5-44e2-bc8d-1a71c1448f57","Type":"ContainerStarted","Data":"7c2721e5b344978aa00387bbea09520d923f7264d7ccf701f6afe65dfb03e12c"} Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.090285 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.299971 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9p4vl"] Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.302652 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.310582 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.311007 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p4vl"] Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.380550 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpvj5\" (UniqueName: \"kubernetes.io/projected/031d7edf-76d2-4e3c-9985-0d9759f9c8d6-kube-api-access-cpvj5\") pod \"redhat-marketplace-9p4vl\" (UID: \"031d7edf-76d2-4e3c-9985-0d9759f9c8d6\") " pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.380701 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031d7edf-76d2-4e3c-9985-0d9759f9c8d6-catalog-content\") pod \"redhat-marketplace-9p4vl\" (UID: \"031d7edf-76d2-4e3c-9985-0d9759f9c8d6\") " pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.380728 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031d7edf-76d2-4e3c-9985-0d9759f9c8d6-utilities\") pod \"redhat-marketplace-9p4vl\" (UID: \"031d7edf-76d2-4e3c-9985-0d9759f9c8d6\") " pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.482698 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpvj5\" (UniqueName: \"kubernetes.io/projected/031d7edf-76d2-4e3c-9985-0d9759f9c8d6-kube-api-access-cpvj5\") pod \"redhat-marketplace-9p4vl\" (UID: \"031d7edf-76d2-4e3c-9985-0d9759f9c8d6\") " pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.482948 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031d7edf-76d2-4e3c-9985-0d9759f9c8d6-utilities\") pod \"redhat-marketplace-9p4vl\" (UID: \"031d7edf-76d2-4e3c-9985-0d9759f9c8d6\") " pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.482998 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031d7edf-76d2-4e3c-9985-0d9759f9c8d6-catalog-content\") pod \"redhat-marketplace-9p4vl\" (UID: \"031d7edf-76d2-4e3c-9985-0d9759f9c8d6\") " pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.483620 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031d7edf-76d2-4e3c-9985-0d9759f9c8d6-catalog-content\") pod \"redhat-marketplace-9p4vl\" (UID: \"031d7edf-76d2-4e3c-9985-0d9759f9c8d6\") " pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.484011 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031d7edf-76d2-4e3c-9985-0d9759f9c8d6-utilities\") pod \"redhat-marketplace-9p4vl\" (UID: \"031d7edf-76d2-4e3c-9985-0d9759f9c8d6\") " pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.507910 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpvj5\" (UniqueName: \"kubernetes.io/projected/031d7edf-76d2-4e3c-9985-0d9759f9c8d6-kube-api-access-cpvj5\") pod \"redhat-marketplace-9p4vl\" (UID: \"031d7edf-76d2-4e3c-9985-0d9759f9c8d6\") " pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.550181 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pdccw"] Feb 27 16:31:46 crc kubenswrapper[4751]: W0227 16:31:46.606187 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d9a23e1_4ca5_4648_b249_303e9e41a14c.slice/crio-a6225eec82e591bfc44c801b1ba13fb499f4c2a144e8b493fe170895c1033def WatchSource:0}: Error finding container a6225eec82e591bfc44c801b1ba13fb499f4c2a144e8b493fe170895c1033def: Status 404 returned error can't find the container with id a6225eec82e591bfc44c801b1ba13fb499f4c2a144e8b493fe170895c1033def Feb 27 16:31:46 crc kubenswrapper[4751]: I0227 16:31:46.621121 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:47 crc kubenswrapper[4751]: I0227 16:31:47.027281 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p4vl"] Feb 27 16:31:47 crc kubenswrapper[4751]: W0227 16:31:47.034475 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod031d7edf_76d2_4e3c_9985_0d9759f9c8d6.slice/crio-736775e6de7eeb83b10e195af3a881c785f68ce978f64bfccc30d46716d91180 WatchSource:0}: Error finding container 736775e6de7eeb83b10e195af3a881c785f68ce978f64bfccc30d46716d91180: Status 404 returned error can't find the container with id 736775e6de7eeb83b10e195af3a881c785f68ce978f64bfccc30d46716d91180 Feb 27 16:31:47 crc kubenswrapper[4751]: I0227 16:31:47.041689 4751 generic.go:334] "Generic (PLEG): container finished" podID="5195d38b-7810-4277-b020-5a89a9189dc0" containerID="73d2cf9a6113ac375b5a81d7ebfce09f357bfc175fb1c2c7caeeab7d27c72ab9" exitCode=0 Feb 27 16:31:47 crc kubenswrapper[4751]: I0227 16:31:47.041833 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j626z" event={"ID":"5195d38b-7810-4277-b020-5a89a9189dc0","Type":"ContainerDied","Data":"73d2cf9a6113ac375b5a81d7ebfce09f357bfc175fb1c2c7caeeab7d27c72ab9"} Feb 27 16:31:47 crc kubenswrapper[4751]: I0227 16:31:47.046291 4751 generic.go:334] "Generic (PLEG): container finished" podID="9d9a23e1-4ca5-4648-b249-303e9e41a14c" containerID="8d40bd992cae460e761f6b66700262c290abda2b765e4a47fdc0427cdaaac620" exitCode=0 Feb 27 16:31:47 crc kubenswrapper[4751]: I0227 16:31:47.046356 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pdccw" event={"ID":"9d9a23e1-4ca5-4648-b249-303e9e41a14c","Type":"ContainerDied","Data":"8d40bd992cae460e761f6b66700262c290abda2b765e4a47fdc0427cdaaac620"} Feb 27 16:31:47 crc kubenswrapper[4751]: I0227 16:31:47.046394 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pdccw" event={"ID":"9d9a23e1-4ca5-4648-b249-303e9e41a14c","Type":"ContainerStarted","Data":"a6225eec82e591bfc44c801b1ba13fb499f4c2a144e8b493fe170895c1033def"} Feb 27 16:31:48 crc kubenswrapper[4751]: I0227 16:31:48.054190 4751 generic.go:334] "Generic (PLEG): container finished" podID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerID="dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae" exitCode=0 Feb 27 16:31:48 crc kubenswrapper[4751]: I0227 16:31:48.054259 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7hzl" event={"ID":"203be014-29c5-44e2-bc8d-1a71c1448f57","Type":"ContainerDied","Data":"dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae"} Feb 27 16:31:48 crc kubenswrapper[4751]: I0227 16:31:48.058206 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pdccw" event={"ID":"9d9a23e1-4ca5-4648-b249-303e9e41a14c","Type":"ContainerStarted","Data":"ac9746256fdb91747c5275f7749cec03e19b12664bb4bd46a82c201c2dc1e503"} Feb 27 16:31:48 crc kubenswrapper[4751]: I0227 16:31:48.064227 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j626z" event={"ID":"5195d38b-7810-4277-b020-5a89a9189dc0","Type":"ContainerStarted","Data":"187ef7d71b26ffff7df60870d450743967ad1ffebccb6684b77ec5d620c8cbe8"} Feb 27 16:31:48 crc kubenswrapper[4751]: I0227 16:31:48.070192 4751 generic.go:334] "Generic (PLEG): container finished" podID="031d7edf-76d2-4e3c-9985-0d9759f9c8d6" containerID="03b363123dcf62895433312620f4f3abd7cabd1d3cfcd45ec427d39aef24fcbe" exitCode=0 Feb 27 16:31:48 crc kubenswrapper[4751]: I0227 16:31:48.070469 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p4vl" event={"ID":"031d7edf-76d2-4e3c-9985-0d9759f9c8d6","Type":"ContainerDied","Data":"03b363123dcf62895433312620f4f3abd7cabd1d3cfcd45ec427d39aef24fcbe"} Feb 27 16:31:48 crc kubenswrapper[4751]: I0227 16:31:48.070543 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p4vl" event={"ID":"031d7edf-76d2-4e3c-9985-0d9759f9c8d6","Type":"ContainerStarted","Data":"736775e6de7eeb83b10e195af3a881c785f68ce978f64bfccc30d46716d91180"} Feb 27 16:31:48 crc kubenswrapper[4751]: I0227 16:31:48.153669 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j626z" podStartSLOduration=2.485459156 podStartE2EDuration="5.153649247s" podCreationTimestamp="2026-02-27 16:31:43 +0000 UTC" firstStartedPulling="2026-02-27 16:31:45.021244332 +0000 UTC m=+467.168258779" lastFinishedPulling="2026-02-27 16:31:47.689434423 +0000 UTC m=+469.836448870" observedRunningTime="2026-02-27 16:31:48.148972904 +0000 UTC m=+470.295987391" watchObservedRunningTime="2026-02-27 16:31:48.153649247 +0000 UTC m=+470.300663694" Feb 27 16:31:49 crc kubenswrapper[4751]: I0227 16:31:49.082220 4751 generic.go:334] "Generic (PLEG): container finished" podID="9d9a23e1-4ca5-4648-b249-303e9e41a14c" containerID="ac9746256fdb91747c5275f7749cec03e19b12664bb4bd46a82c201c2dc1e503" exitCode=0 Feb 27 16:31:49 crc kubenswrapper[4751]: I0227 16:31:49.082349 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pdccw" event={"ID":"9d9a23e1-4ca5-4648-b249-303e9e41a14c","Type":"ContainerDied","Data":"ac9746256fdb91747c5275f7749cec03e19b12664bb4bd46a82c201c2dc1e503"} Feb 27 16:31:49 crc kubenswrapper[4751]: I0227 16:31:49.084877 4751 generic.go:334] "Generic (PLEG): container finished" podID="031d7edf-76d2-4e3c-9985-0d9759f9c8d6" containerID="6d04aba5e8ccc36e7dbe61822224c446d85158ec7a475df65766912b8676729c" exitCode=0 Feb 27 16:31:49 crc kubenswrapper[4751]: I0227 16:31:49.084969 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p4vl" event={"ID":"031d7edf-76d2-4e3c-9985-0d9759f9c8d6","Type":"ContainerDied","Data":"6d04aba5e8ccc36e7dbe61822224c446d85158ec7a475df65766912b8676729c"} Feb 27 16:31:49 crc kubenswrapper[4751]: I0227 16:31:49.087820 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7hzl" event={"ID":"203be014-29c5-44e2-bc8d-1a71c1448f57","Type":"ContainerStarted","Data":"173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5"} Feb 27 16:31:49 crc kubenswrapper[4751]: I0227 16:31:49.138932 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-f7hzl" podStartSLOduration=3.680129692 podStartE2EDuration="6.138908755s" podCreationTimestamp="2026-02-27 16:31:43 +0000 UTC" firstStartedPulling="2026-02-27 16:31:46.032903287 +0000 UTC m=+468.179917734" lastFinishedPulling="2026-02-27 16:31:48.49168234 +0000 UTC m=+470.638696797" observedRunningTime="2026-02-27 16:31:49.133200115 +0000 UTC m=+471.280214562" watchObservedRunningTime="2026-02-27 16:31:49.138908755 +0000 UTC m=+471.285923192" Feb 27 16:31:50 crc kubenswrapper[4751]: I0227 16:31:50.098091 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p4vl" event={"ID":"031d7edf-76d2-4e3c-9985-0d9759f9c8d6","Type":"ContainerStarted","Data":"3a2c73a0f3b0c19b787d178f7b17fc064ed09e28cfad84836894f691669bcc8f"} Feb 27 16:31:50 crc kubenswrapper[4751]: I0227 16:31:50.101872 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pdccw" event={"ID":"9d9a23e1-4ca5-4648-b249-303e9e41a14c","Type":"ContainerStarted","Data":"e3b801f0a0e5b2b7fec5b3601c76a2423ceffc1a52cd525e557885c4e140865b"} Feb 27 16:31:50 crc kubenswrapper[4751]: I0227 16:31:50.151076 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pdccw" podStartSLOduration=2.71697717 podStartE2EDuration="5.151047792s" podCreationTimestamp="2026-02-27 16:31:45 +0000 UTC" firstStartedPulling="2026-02-27 16:31:47.047721064 +0000 UTC m=+469.194735541" lastFinishedPulling="2026-02-27 16:31:49.481791716 +0000 UTC m=+471.628806163" observedRunningTime="2026-02-27 16:31:50.149221073 +0000 UTC m=+472.296235540" watchObservedRunningTime="2026-02-27 16:31:50.151047792 +0000 UTC m=+472.298062239" Feb 27 16:31:50 crc kubenswrapper[4751]: I0227 16:31:50.154554 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9p4vl" podStartSLOduration=2.636865342 podStartE2EDuration="4.154531574s" podCreationTimestamp="2026-02-27 16:31:46 +0000 UTC" firstStartedPulling="2026-02-27 16:31:48.072369592 +0000 UTC m=+470.219384049" lastFinishedPulling="2026-02-27 16:31:49.590035834 +0000 UTC m=+471.737050281" observedRunningTime="2026-02-27 16:31:50.128048345 +0000 UTC m=+472.275062802" watchObservedRunningTime="2026-02-27 16:31:50.154531574 +0000 UTC m=+472.301546021" Feb 27 16:31:53 crc kubenswrapper[4751]: I0227 16:31:53.620361 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:53 crc kubenswrapper[4751]: I0227 16:31:53.620808 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:31:54 crc kubenswrapper[4751]: I0227 16:31:54.225482 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:54 crc kubenswrapper[4751]: I0227 16:31:54.225805 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:54 crc kubenswrapper[4751]: I0227 16:31:54.314787 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:54 crc kubenswrapper[4751]: I0227 16:31:54.683945 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-j626z" podUID="5195d38b-7810-4277-b020-5a89a9189dc0" containerName="registry-server" probeResult="failure" output=< Feb 27 16:31:54 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 16:31:54 crc kubenswrapper[4751]: > Feb 27 16:31:55 crc kubenswrapper[4751]: I0227 16:31:55.179578 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 16:31:56 crc kubenswrapper[4751]: I0227 16:31:56.090969 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:56 crc kubenswrapper[4751]: I0227 16:31:56.091037 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:56 crc kubenswrapper[4751]: I0227 16:31:56.158142 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:56 crc kubenswrapper[4751]: I0227 16:31:56.621586 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:56 crc kubenswrapper[4751]: I0227 16:31:56.621655 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:56 crc kubenswrapper[4751]: I0227 16:31:56.666333 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:57 crc kubenswrapper[4751]: I0227 16:31:57.182284 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9p4vl" Feb 27 16:31:57 crc kubenswrapper[4751]: I0227 16:31:57.186611 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pdccw" Feb 27 16:31:58 crc kubenswrapper[4751]: I0227 16:31:58.918680 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:31:58 crc kubenswrapper[4751]: I0227 16:31:58.918974 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.153812 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536832-lhd8z"] Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.162870 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536832-lhd8z" Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.168873 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.168885 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.172194 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536832-lhd8z"] Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.172929 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.301232 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9fx8\" (UniqueName: \"kubernetes.io/projected/a52fb7c4-bd18-4923-a287-bd38d5dfa546-kube-api-access-f9fx8\") pod \"auto-csr-approver-29536832-lhd8z\" (UID: \"a52fb7c4-bd18-4923-a287-bd38d5dfa546\") " pod="openshift-infra/auto-csr-approver-29536832-lhd8z" Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.402942 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9fx8\" (UniqueName: \"kubernetes.io/projected/a52fb7c4-bd18-4923-a287-bd38d5dfa546-kube-api-access-f9fx8\") pod \"auto-csr-approver-29536832-lhd8z\" (UID: \"a52fb7c4-bd18-4923-a287-bd38d5dfa546\") " pod="openshift-infra/auto-csr-approver-29536832-lhd8z" Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.439529 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9fx8\" (UniqueName: \"kubernetes.io/projected/a52fb7c4-bd18-4923-a287-bd38d5dfa546-kube-api-access-f9fx8\") pod \"auto-csr-approver-29536832-lhd8z\" (UID: \"a52fb7c4-bd18-4923-a287-bd38d5dfa546\") " pod="openshift-infra/auto-csr-approver-29536832-lhd8z" Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.492007 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536832-lhd8z" Feb 27 16:32:00 crc kubenswrapper[4751]: I0227 16:32:00.928239 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536832-lhd8z"] Feb 27 16:32:01 crc kubenswrapper[4751]: I0227 16:32:01.176927 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536832-lhd8z" event={"ID":"a52fb7c4-bd18-4923-a287-bd38d5dfa546","Type":"ContainerStarted","Data":"2c07d11c0d049858d27b8d7c3ed6c9f0004ee64ff64a76c3c2163d62a09d5f7f"} Feb 27 16:32:02 crc kubenswrapper[4751]: I0227 16:32:02.132682 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-529dz" Feb 27 16:32:02 crc kubenswrapper[4751]: I0227 16:32:02.212130 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zjr9n"] Feb 27 16:32:03 crc kubenswrapper[4751]: I0227 16:32:03.698354 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:32:03 crc kubenswrapper[4751]: I0227 16:32:03.750899 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j626z" Feb 27 16:32:04 crc kubenswrapper[4751]: I0227 16:32:04.199319 4751 generic.go:334] "Generic (PLEG): container finished" podID="a52fb7c4-bd18-4923-a287-bd38d5dfa546" containerID="a2be35fb931ad32d4203d5790923f1927698671435a08b8e1b56d308b968e625" exitCode=0 Feb 27 16:32:04 crc kubenswrapper[4751]: I0227 16:32:04.199390 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536832-lhd8z" event={"ID":"a52fb7c4-bd18-4923-a287-bd38d5dfa546","Type":"ContainerDied","Data":"a2be35fb931ad32d4203d5790923f1927698671435a08b8e1b56d308b968e625"} Feb 27 16:32:05 crc kubenswrapper[4751]: I0227 16:32:05.631535 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536832-lhd8z" Feb 27 16:32:05 crc kubenswrapper[4751]: I0227 16:32:05.713690 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9fx8\" (UniqueName: \"kubernetes.io/projected/a52fb7c4-bd18-4923-a287-bd38d5dfa546-kube-api-access-f9fx8\") pod \"a52fb7c4-bd18-4923-a287-bd38d5dfa546\" (UID: \"a52fb7c4-bd18-4923-a287-bd38d5dfa546\") " Feb 27 16:32:05 crc kubenswrapper[4751]: I0227 16:32:05.719916 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a52fb7c4-bd18-4923-a287-bd38d5dfa546-kube-api-access-f9fx8" (OuterVolumeSpecName: "kube-api-access-f9fx8") pod "a52fb7c4-bd18-4923-a287-bd38d5dfa546" (UID: "a52fb7c4-bd18-4923-a287-bd38d5dfa546"). InnerVolumeSpecName "kube-api-access-f9fx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:32:05 crc kubenswrapper[4751]: I0227 16:32:05.815925 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9fx8\" (UniqueName: \"kubernetes.io/projected/a52fb7c4-bd18-4923-a287-bd38d5dfa546-kube-api-access-f9fx8\") on node \"crc\" DevicePath \"\"" Feb 27 16:32:06 crc kubenswrapper[4751]: I0227 16:32:06.218810 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536832-lhd8z" event={"ID":"a52fb7c4-bd18-4923-a287-bd38d5dfa546","Type":"ContainerDied","Data":"2c07d11c0d049858d27b8d7c3ed6c9f0004ee64ff64a76c3c2163d62a09d5f7f"} Feb 27 16:32:06 crc kubenswrapper[4751]: I0227 16:32:06.219210 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c07d11c0d049858d27b8d7c3ed6c9f0004ee64ff64a76c3c2163d62a09d5f7f" Feb 27 16:32:06 crc kubenswrapper[4751]: I0227 16:32:06.218907 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536832-lhd8z" Feb 27 16:32:06 crc kubenswrapper[4751]: I0227 16:32:06.698975 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536826-mxq7k"] Feb 27 16:32:06 crc kubenswrapper[4751]: I0227 16:32:06.703131 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536826-mxq7k"] Feb 27 16:32:08 crc kubenswrapper[4751]: I0227 16:32:08.528580 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11a526fe-64f1-4da8-a0e8-ed276ec069fb" path="/var/lib/kubelet/pods/11a526fe-64f1-4da8-a0e8-ed276ec069fb/volumes" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.256264 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" podUID="3ffa275a-62dc-46f6-ae70-34b5758d918e" containerName="registry" containerID="cri-o://e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d" gracePeriod=30 Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.687103 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.799176 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"3ffa275a-62dc-46f6-ae70-34b5758d918e\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.799302 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-certificates\") pod \"3ffa275a-62dc-46f6-ae70-34b5758d918e\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.799397 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-trusted-ca\") pod \"3ffa275a-62dc-46f6-ae70-34b5758d918e\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.799490 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-tls\") pod \"3ffa275a-62dc-46f6-ae70-34b5758d918e\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.799595 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3ffa275a-62dc-46f6-ae70-34b5758d918e-ca-trust-extracted\") pod \"3ffa275a-62dc-46f6-ae70-34b5758d918e\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.799630 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3ffa275a-62dc-46f6-ae70-34b5758d918e-installation-pull-secrets\") pod \"3ffa275a-62dc-46f6-ae70-34b5758d918e\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.799663 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-bound-sa-token\") pod \"3ffa275a-62dc-46f6-ae70-34b5758d918e\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.799700 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4svm\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-kube-api-access-v4svm\") pod \"3ffa275a-62dc-46f6-ae70-34b5758d918e\" (UID: \"3ffa275a-62dc-46f6-ae70-34b5758d918e\") " Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.801061 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "3ffa275a-62dc-46f6-ae70-34b5758d918e" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.801982 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "3ffa275a-62dc-46f6-ae70-34b5758d918e" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.802289 4751 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-certificates\") on node \"crc\" DevicePath \"\"" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.802350 4751 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3ffa275a-62dc-46f6-ae70-34b5758d918e-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.809660 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-kube-api-access-v4svm" (OuterVolumeSpecName: "kube-api-access-v4svm") pod "3ffa275a-62dc-46f6-ae70-34b5758d918e" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e"). InnerVolumeSpecName "kube-api-access-v4svm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.811614 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ffa275a-62dc-46f6-ae70-34b5758d918e-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "3ffa275a-62dc-46f6-ae70-34b5758d918e" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.811745 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "3ffa275a-62dc-46f6-ae70-34b5758d918e" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.813900 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "3ffa275a-62dc-46f6-ae70-34b5758d918e" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.816543 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "3ffa275a-62dc-46f6-ae70-34b5758d918e" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.831112 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ffa275a-62dc-46f6-ae70-34b5758d918e-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "3ffa275a-62dc-46f6-ae70-34b5758d918e" (UID: "3ffa275a-62dc-46f6-ae70-34b5758d918e"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.903206 4751 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-registry-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.903243 4751 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3ffa275a-62dc-46f6-ae70-34b5758d918e-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.903257 4751 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3ffa275a-62dc-46f6-ae70-34b5758d918e-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.903271 4751 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 27 16:32:27 crc kubenswrapper[4751]: I0227 16:32:27.903283 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4svm\" (UniqueName: \"kubernetes.io/projected/3ffa275a-62dc-46f6-ae70-34b5758d918e-kube-api-access-v4svm\") on node \"crc\" DevicePath \"\"" Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.358852 4751 generic.go:334] "Generic (PLEG): container finished" podID="3ffa275a-62dc-46f6-ae70-34b5758d918e" containerID="e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d" exitCode=0 Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.358910 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" event={"ID":"3ffa275a-62dc-46f6-ae70-34b5758d918e","Type":"ContainerDied","Data":"e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d"} Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.358955 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" event={"ID":"3ffa275a-62dc-46f6-ae70-34b5758d918e","Type":"ContainerDied","Data":"5008ec15ce6fc74df003e64e3b87fbe5a34cce1834ba743f6fc5c74837d3b72b"} Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.358977 4751 scope.go:117] "RemoveContainer" containerID="e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d" Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.358970 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zjr9n" Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.402761 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zjr9n"] Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.413948 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zjr9n"] Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.426181 4751 scope.go:117] "RemoveContainer" containerID="e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d" Feb 27 16:32:28 crc kubenswrapper[4751]: E0227 16:32:28.426805 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d\": container with ID starting with e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d not found: ID does not exist" containerID="e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d" Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.426865 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d"} err="failed to get container status \"e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d\": rpc error: code = NotFound desc = could not find container \"e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d\": container with ID starting with e46f4c3d3ae8c64c11cb456046b10b45573382b9edad25fb12c93f0c93336c3d not found: ID does not exist" Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.530205 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ffa275a-62dc-46f6-ae70-34b5758d918e" path="/var/lib/kubelet/pods/3ffa275a-62dc-46f6-ae70-34b5758d918e/volumes" Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.918571 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.918639 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.918691 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.919451 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0a576cb428bc8c6c7aa4cc0e1673e4d3aa049a82e2ef3e79581b883ffcfca488"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 16:32:28 crc kubenswrapper[4751]: I0227 16:32:28.919548 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://0a576cb428bc8c6c7aa4cc0e1673e4d3aa049a82e2ef3e79581b883ffcfca488" gracePeriod=600 Feb 27 16:32:29 crc kubenswrapper[4751]: I0227 16:32:29.371251 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="0a576cb428bc8c6c7aa4cc0e1673e4d3aa049a82e2ef3e79581b883ffcfca488" exitCode=0 Feb 27 16:32:29 crc kubenswrapper[4751]: I0227 16:32:29.371357 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"0a576cb428bc8c6c7aa4cc0e1673e4d3aa049a82e2ef3e79581b883ffcfca488"} Feb 27 16:32:29 crc kubenswrapper[4751]: I0227 16:32:29.371620 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"ffc9c7cd2c6fc8ac46b1b7d570ff4efe965dd019a548ff518b61e9ff634572af"} Feb 27 16:32:29 crc kubenswrapper[4751]: I0227 16:32:29.371653 4751 scope.go:117] "RemoveContainer" containerID="4e0b20bfc70d414ef04df5c1ad269566d932ffb59c9d8beda2ee41fdf29a7154" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.151794 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536834-s786r"] Feb 27 16:34:00 crc kubenswrapper[4751]: E0227 16:34:00.153489 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ffa275a-62dc-46f6-ae70-34b5758d918e" containerName="registry" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.153525 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ffa275a-62dc-46f6-ae70-34b5758d918e" containerName="registry" Feb 27 16:34:00 crc kubenswrapper[4751]: E0227 16:34:00.153559 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a52fb7c4-bd18-4923-a287-bd38d5dfa546" containerName="oc" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.153576 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a52fb7c4-bd18-4923-a287-bd38d5dfa546" containerName="oc" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.153798 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ffa275a-62dc-46f6-ae70-34b5758d918e" containerName="registry" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.153834 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a52fb7c4-bd18-4923-a287-bd38d5dfa546" containerName="oc" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.154730 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536834-s786r" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.159077 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.159202 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.161306 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.161846 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536834-s786r"] Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.315844 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9lz7\" (UniqueName: \"kubernetes.io/projected/8c33cd39-6c33-4bd9-80aa-819849282664-kube-api-access-s9lz7\") pod \"auto-csr-approver-29536834-s786r\" (UID: \"8c33cd39-6c33-4bd9-80aa-819849282664\") " pod="openshift-infra/auto-csr-approver-29536834-s786r" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.418230 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9lz7\" (UniqueName: \"kubernetes.io/projected/8c33cd39-6c33-4bd9-80aa-819849282664-kube-api-access-s9lz7\") pod \"auto-csr-approver-29536834-s786r\" (UID: \"8c33cd39-6c33-4bd9-80aa-819849282664\") " pod="openshift-infra/auto-csr-approver-29536834-s786r" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.455211 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9lz7\" (UniqueName: \"kubernetes.io/projected/8c33cd39-6c33-4bd9-80aa-819849282664-kube-api-access-s9lz7\") pod \"auto-csr-approver-29536834-s786r\" (UID: \"8c33cd39-6c33-4bd9-80aa-819849282664\") " pod="openshift-infra/auto-csr-approver-29536834-s786r" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.485365 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536834-s786r" Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.757778 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536834-s786r"] Feb 27 16:34:00 crc kubenswrapper[4751]: I0227 16:34:00.775075 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 16:34:01 crc kubenswrapper[4751]: I0227 16:34:01.056256 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536834-s786r" event={"ID":"8c33cd39-6c33-4bd9-80aa-819849282664","Type":"ContainerStarted","Data":"e6aed91400298b4b39ab8e76850bdbaabebf87d7b3564b39f5b8b72c25ae8b86"} Feb 27 16:34:03 crc kubenswrapper[4751]: I0227 16:34:03.074386 4751 generic.go:334] "Generic (PLEG): container finished" podID="8c33cd39-6c33-4bd9-80aa-819849282664" containerID="9f78361b0884090d3f79935b157812fcb5c1a4975336aed5d96d022d5f80dac6" exitCode=0 Feb 27 16:34:03 crc kubenswrapper[4751]: I0227 16:34:03.074577 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536834-s786r" event={"ID":"8c33cd39-6c33-4bd9-80aa-819849282664","Type":"ContainerDied","Data":"9f78361b0884090d3f79935b157812fcb5c1a4975336aed5d96d022d5f80dac6"} Feb 27 16:34:04 crc kubenswrapper[4751]: I0227 16:34:04.369660 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536834-s786r" Feb 27 16:34:04 crc kubenswrapper[4751]: I0227 16:34:04.478200 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9lz7\" (UniqueName: \"kubernetes.io/projected/8c33cd39-6c33-4bd9-80aa-819849282664-kube-api-access-s9lz7\") pod \"8c33cd39-6c33-4bd9-80aa-819849282664\" (UID: \"8c33cd39-6c33-4bd9-80aa-819849282664\") " Feb 27 16:34:04 crc kubenswrapper[4751]: I0227 16:34:04.490558 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c33cd39-6c33-4bd9-80aa-819849282664-kube-api-access-s9lz7" (OuterVolumeSpecName: "kube-api-access-s9lz7") pod "8c33cd39-6c33-4bd9-80aa-819849282664" (UID: "8c33cd39-6c33-4bd9-80aa-819849282664"). InnerVolumeSpecName "kube-api-access-s9lz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:34:04 crc kubenswrapper[4751]: I0227 16:34:04.580859 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9lz7\" (UniqueName: \"kubernetes.io/projected/8c33cd39-6c33-4bd9-80aa-819849282664-kube-api-access-s9lz7\") on node \"crc\" DevicePath \"\"" Feb 27 16:34:05 crc kubenswrapper[4751]: I0227 16:34:05.092221 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536834-s786r" event={"ID":"8c33cd39-6c33-4bd9-80aa-819849282664","Type":"ContainerDied","Data":"e6aed91400298b4b39ab8e76850bdbaabebf87d7b3564b39f5b8b72c25ae8b86"} Feb 27 16:34:05 crc kubenswrapper[4751]: I0227 16:34:05.092272 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536834-s786r" Feb 27 16:34:05 crc kubenswrapper[4751]: I0227 16:34:05.092280 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6aed91400298b4b39ab8e76850bdbaabebf87d7b3564b39f5b8b72c25ae8b86" Feb 27 16:34:05 crc kubenswrapper[4751]: I0227 16:34:05.441298 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536828-jvl6d"] Feb 27 16:34:05 crc kubenswrapper[4751]: I0227 16:34:05.447856 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536828-jvl6d"] Feb 27 16:34:06 crc kubenswrapper[4751]: I0227 16:34:06.532560 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d" path="/var/lib/kubelet/pods/9b8fa1bb-3fd3-4b7a-b94d-800ffcb15b2d/volumes" Feb 27 16:34:22 crc kubenswrapper[4751]: I0227 16:34:22.995978 4751 scope.go:117] "RemoveContainer" containerID="fe924e01ba8e552fc1f993d5fc0a42e34974911c56b846c9015377650c69ed65" Feb 27 16:34:58 crc kubenswrapper[4751]: I0227 16:34:58.918753 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:34:58 crc kubenswrapper[4751]: I0227 16:34:58.921867 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:35:23 crc kubenswrapper[4751]: I0227 16:35:23.038102 4751 scope.go:117] "RemoveContainer" containerID="1f171047e55a972a58277c75a89d5a5c01ed580dd49df219e0ddf72536ee15e2" Feb 27 16:35:23 crc kubenswrapper[4751]: I0227 16:35:23.066028 4751 scope.go:117] "RemoveContainer" containerID="12487aad43858aa1a0f16a992d3cafe9baa676f8aab31655ccbb907cb250a2e3" Feb 27 16:35:23 crc kubenswrapper[4751]: I0227 16:35:23.113244 4751 scope.go:117] "RemoveContainer" containerID="0a159ba30f7fbe4dd02231bb3ba0d1e8d73a807b15e69daabdbd8f1e250283d8" Feb 27 16:35:23 crc kubenswrapper[4751]: I0227 16:35:23.159686 4751 scope.go:117] "RemoveContainer" containerID="bac6c8105c744a581e3881e4f2d5bea4ce649ac6feb6da7ea0befd69a9ea4c32" Feb 27 16:35:23 crc kubenswrapper[4751]: I0227 16:35:23.179602 4751 scope.go:117] "RemoveContainer" containerID="d447c09062902c914e9ac4fc2a58c1393094af8fb4c73c6a9426d45aa8ee066e" Feb 27 16:35:28 crc kubenswrapper[4751]: I0227 16:35:28.918906 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:35:28 crc kubenswrapper[4751]: I0227 16:35:28.919367 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:35:58 crc kubenswrapper[4751]: I0227 16:35:58.918540 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:35:58 crc kubenswrapper[4751]: I0227 16:35:58.919260 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:35:58 crc kubenswrapper[4751]: I0227 16:35:58.919324 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:35:58 crc kubenswrapper[4751]: I0227 16:35:58.920134 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ffc9c7cd2c6fc8ac46b1b7d570ff4efe965dd019a548ff518b61e9ff634572af"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 16:35:58 crc kubenswrapper[4751]: I0227 16:35:58.920210 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://ffc9c7cd2c6fc8ac46b1b7d570ff4efe965dd019a548ff518b61e9ff634572af" gracePeriod=600 Feb 27 16:35:59 crc kubenswrapper[4751]: I0227 16:35:59.065670 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="ffc9c7cd2c6fc8ac46b1b7d570ff4efe965dd019a548ff518b61e9ff634572af" exitCode=0 Feb 27 16:35:59 crc kubenswrapper[4751]: I0227 16:35:59.065724 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"ffc9c7cd2c6fc8ac46b1b7d570ff4efe965dd019a548ff518b61e9ff634572af"} Feb 27 16:35:59 crc kubenswrapper[4751]: I0227 16:35:59.065794 4751 scope.go:117] "RemoveContainer" containerID="0a576cb428bc8c6c7aa4cc0e1673e4d3aa049a82e2ef3e79581b883ffcfca488" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.075524 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"6eb163b225e8b4061c0a49276f7f1481358603b35f8794f8c9ade9058836265d"} Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.145523 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536836-txd2x"] Feb 27 16:36:00 crc kubenswrapper[4751]: E0227 16:36:00.145867 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c33cd39-6c33-4bd9-80aa-819849282664" containerName="oc" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.145895 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c33cd39-6c33-4bd9-80aa-819849282664" containerName="oc" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.146093 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c33cd39-6c33-4bd9-80aa-819849282664" containerName="oc" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.146744 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536836-txd2x" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.149841 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.150224 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.151183 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.153056 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536836-txd2x"] Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.199760 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79bbw\" (UniqueName: \"kubernetes.io/projected/837698d3-c1f7-4873-b909-5fbab8c45f05-kube-api-access-79bbw\") pod \"auto-csr-approver-29536836-txd2x\" (UID: \"837698d3-c1f7-4873-b909-5fbab8c45f05\") " pod="openshift-infra/auto-csr-approver-29536836-txd2x" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.301842 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79bbw\" (UniqueName: \"kubernetes.io/projected/837698d3-c1f7-4873-b909-5fbab8c45f05-kube-api-access-79bbw\") pod \"auto-csr-approver-29536836-txd2x\" (UID: \"837698d3-c1f7-4873-b909-5fbab8c45f05\") " pod="openshift-infra/auto-csr-approver-29536836-txd2x" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.329549 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79bbw\" (UniqueName: \"kubernetes.io/projected/837698d3-c1f7-4873-b909-5fbab8c45f05-kube-api-access-79bbw\") pod \"auto-csr-approver-29536836-txd2x\" (UID: \"837698d3-c1f7-4873-b909-5fbab8c45f05\") " pod="openshift-infra/auto-csr-approver-29536836-txd2x" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.464229 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536836-txd2x" Feb 27 16:36:00 crc kubenswrapper[4751]: I0227 16:36:00.715511 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536836-txd2x"] Feb 27 16:36:01 crc kubenswrapper[4751]: I0227 16:36:01.086366 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536836-txd2x" event={"ID":"837698d3-c1f7-4873-b909-5fbab8c45f05","Type":"ContainerStarted","Data":"0094be894246202399f1d91ec858c60c22337145c8b47937777da7dd7896668b"} Feb 27 16:36:02 crc kubenswrapper[4751]: I0227 16:36:02.095682 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536836-txd2x" event={"ID":"837698d3-c1f7-4873-b909-5fbab8c45f05","Type":"ContainerStarted","Data":"0788dc7b2d935a2b967b3f651b7d5955608b2c950c534d0711fa151db94aae07"} Feb 27 16:36:02 crc kubenswrapper[4751]: I0227 16:36:02.118314 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536836-txd2x" podStartSLOduration=1.082199843 podStartE2EDuration="2.11826456s" podCreationTimestamp="2026-02-27 16:36:00 +0000 UTC" firstStartedPulling="2026-02-27 16:36:00.73666836 +0000 UTC m=+722.883682807" lastFinishedPulling="2026-02-27 16:36:01.772733077 +0000 UTC m=+723.919747524" observedRunningTime="2026-02-27 16:36:02.113572665 +0000 UTC m=+724.260587152" watchObservedRunningTime="2026-02-27 16:36:02.11826456 +0000 UTC m=+724.265279037" Feb 27 16:36:03 crc kubenswrapper[4751]: I0227 16:36:03.103735 4751 generic.go:334] "Generic (PLEG): container finished" podID="837698d3-c1f7-4873-b909-5fbab8c45f05" containerID="0788dc7b2d935a2b967b3f651b7d5955608b2c950c534d0711fa151db94aae07" exitCode=0 Feb 27 16:36:03 crc kubenswrapper[4751]: I0227 16:36:03.103843 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536836-txd2x" event={"ID":"837698d3-c1f7-4873-b909-5fbab8c45f05","Type":"ContainerDied","Data":"0788dc7b2d935a2b967b3f651b7d5955608b2c950c534d0711fa151db94aae07"} Feb 27 16:36:04 crc kubenswrapper[4751]: I0227 16:36:04.387378 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536836-txd2x" Feb 27 16:36:04 crc kubenswrapper[4751]: I0227 16:36:04.480339 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79bbw\" (UniqueName: \"kubernetes.io/projected/837698d3-c1f7-4873-b909-5fbab8c45f05-kube-api-access-79bbw\") pod \"837698d3-c1f7-4873-b909-5fbab8c45f05\" (UID: \"837698d3-c1f7-4873-b909-5fbab8c45f05\") " Feb 27 16:36:04 crc kubenswrapper[4751]: I0227 16:36:04.488315 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/837698d3-c1f7-4873-b909-5fbab8c45f05-kube-api-access-79bbw" (OuterVolumeSpecName: "kube-api-access-79bbw") pod "837698d3-c1f7-4873-b909-5fbab8c45f05" (UID: "837698d3-c1f7-4873-b909-5fbab8c45f05"). InnerVolumeSpecName "kube-api-access-79bbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:36:04 crc kubenswrapper[4751]: I0227 16:36:04.582666 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79bbw\" (UniqueName: \"kubernetes.io/projected/837698d3-c1f7-4873-b909-5fbab8c45f05-kube-api-access-79bbw\") on node \"crc\" DevicePath \"\"" Feb 27 16:36:05 crc kubenswrapper[4751]: I0227 16:36:05.126078 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536836-txd2x" event={"ID":"837698d3-c1f7-4873-b909-5fbab8c45f05","Type":"ContainerDied","Data":"0094be894246202399f1d91ec858c60c22337145c8b47937777da7dd7896668b"} Feb 27 16:36:05 crc kubenswrapper[4751]: I0227 16:36:05.126579 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0094be894246202399f1d91ec858c60c22337145c8b47937777da7dd7896668b" Feb 27 16:36:05 crc kubenswrapper[4751]: I0227 16:36:05.126147 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536836-txd2x" Feb 27 16:36:05 crc kubenswrapper[4751]: I0227 16:36:05.202697 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536830-lp72m"] Feb 27 16:36:05 crc kubenswrapper[4751]: I0227 16:36:05.211114 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536830-lp72m"] Feb 27 16:36:06 crc kubenswrapper[4751]: I0227 16:36:06.537206 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00cac46b-1c35-43d3-82ba-777e5ebb11c4" path="/var/lib/kubelet/pods/00cac46b-1c35-43d3-82ba-777e5ebb11c4/volumes" Feb 27 16:36:23 crc kubenswrapper[4751]: I0227 16:36:23.265593 4751 scope.go:117] "RemoveContainer" containerID="06d49298e2ca94c43982065aaa20ab384a489973a7724f8b8a5cc8ecb6cd953d" Feb 27 16:37:47 crc kubenswrapper[4751]: I0227 16:37:47.440035 4751 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.153606 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536838-qbq2z"] Feb 27 16:38:00 crc kubenswrapper[4751]: E0227 16:38:00.154820 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="837698d3-c1f7-4873-b909-5fbab8c45f05" containerName="oc" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.154927 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="837698d3-c1f7-4873-b909-5fbab8c45f05" containerName="oc" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.155181 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="837698d3-c1f7-4873-b909-5fbab8c45f05" containerName="oc" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.156038 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536838-qbq2z" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.168075 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.168356 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.169737 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.170335 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536838-qbq2z"] Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.291911 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btgrk\" (UniqueName: \"kubernetes.io/projected/32fbc6ca-5072-482c-bd83-c773f3add3fa-kube-api-access-btgrk\") pod \"auto-csr-approver-29536838-qbq2z\" (UID: \"32fbc6ca-5072-482c-bd83-c773f3add3fa\") " pod="openshift-infra/auto-csr-approver-29536838-qbq2z" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.392989 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btgrk\" (UniqueName: \"kubernetes.io/projected/32fbc6ca-5072-482c-bd83-c773f3add3fa-kube-api-access-btgrk\") pod \"auto-csr-approver-29536838-qbq2z\" (UID: \"32fbc6ca-5072-482c-bd83-c773f3add3fa\") " pod="openshift-infra/auto-csr-approver-29536838-qbq2z" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.427151 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btgrk\" (UniqueName: \"kubernetes.io/projected/32fbc6ca-5072-482c-bd83-c773f3add3fa-kube-api-access-btgrk\") pod \"auto-csr-approver-29536838-qbq2z\" (UID: \"32fbc6ca-5072-482c-bd83-c773f3add3fa\") " pod="openshift-infra/auto-csr-approver-29536838-qbq2z" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.484283 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536838-qbq2z" Feb 27 16:38:00 crc kubenswrapper[4751]: I0227 16:38:00.801231 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536838-qbq2z"] Feb 27 16:38:00 crc kubenswrapper[4751]: W0227 16:38:00.809269 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod32fbc6ca_5072_482c_bd83_c773f3add3fa.slice/crio-45a42d0f1e8ec316be2daef506bdab110ba943661175991a3e359662507bc08b WatchSource:0}: Error finding container 45a42d0f1e8ec316be2daef506bdab110ba943661175991a3e359662507bc08b: Status 404 returned error can't find the container with id 45a42d0f1e8ec316be2daef506bdab110ba943661175991a3e359662507bc08b Feb 27 16:38:01 crc kubenswrapper[4751]: I0227 16:38:01.288179 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536838-qbq2z" event={"ID":"32fbc6ca-5072-482c-bd83-c773f3add3fa","Type":"ContainerStarted","Data":"45a42d0f1e8ec316be2daef506bdab110ba943661175991a3e359662507bc08b"} Feb 27 16:38:02 crc kubenswrapper[4751]: I0227 16:38:02.300510 4751 generic.go:334] "Generic (PLEG): container finished" podID="32fbc6ca-5072-482c-bd83-c773f3add3fa" containerID="61f234fc875974581fc093b56a1b026814efff2b98cc42afb332384821bd7487" exitCode=0 Feb 27 16:38:02 crc kubenswrapper[4751]: I0227 16:38:02.300583 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536838-qbq2z" event={"ID":"32fbc6ca-5072-482c-bd83-c773f3add3fa","Type":"ContainerDied","Data":"61f234fc875974581fc093b56a1b026814efff2b98cc42afb332384821bd7487"} Feb 27 16:38:03 crc kubenswrapper[4751]: I0227 16:38:03.589768 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536838-qbq2z" Feb 27 16:38:03 crc kubenswrapper[4751]: I0227 16:38:03.747561 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btgrk\" (UniqueName: \"kubernetes.io/projected/32fbc6ca-5072-482c-bd83-c773f3add3fa-kube-api-access-btgrk\") pod \"32fbc6ca-5072-482c-bd83-c773f3add3fa\" (UID: \"32fbc6ca-5072-482c-bd83-c773f3add3fa\") " Feb 27 16:38:03 crc kubenswrapper[4751]: I0227 16:38:03.753956 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32fbc6ca-5072-482c-bd83-c773f3add3fa-kube-api-access-btgrk" (OuterVolumeSpecName: "kube-api-access-btgrk") pod "32fbc6ca-5072-482c-bd83-c773f3add3fa" (UID: "32fbc6ca-5072-482c-bd83-c773f3add3fa"). InnerVolumeSpecName "kube-api-access-btgrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:38:03 crc kubenswrapper[4751]: I0227 16:38:03.849659 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btgrk\" (UniqueName: \"kubernetes.io/projected/32fbc6ca-5072-482c-bd83-c773f3add3fa-kube-api-access-btgrk\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:04 crc kubenswrapper[4751]: I0227 16:38:04.323269 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536838-qbq2z" event={"ID":"32fbc6ca-5072-482c-bd83-c773f3add3fa","Type":"ContainerDied","Data":"45a42d0f1e8ec316be2daef506bdab110ba943661175991a3e359662507bc08b"} Feb 27 16:38:04 crc kubenswrapper[4751]: I0227 16:38:04.323414 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45a42d0f1e8ec316be2daef506bdab110ba943661175991a3e359662507bc08b" Feb 27 16:38:04 crc kubenswrapper[4751]: I0227 16:38:04.323463 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536838-qbq2z" Feb 27 16:38:04 crc kubenswrapper[4751]: I0227 16:38:04.656114 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536832-lhd8z"] Feb 27 16:38:04 crc kubenswrapper[4751]: I0227 16:38:04.665681 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536832-lhd8z"] Feb 27 16:38:06 crc kubenswrapper[4751]: I0227 16:38:06.534111 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a52fb7c4-bd18-4923-a287-bd38d5dfa546" path="/var/lib/kubelet/pods/a52fb7c4-bd18-4923-a287-bd38d5dfa546/volumes" Feb 27 16:38:23 crc kubenswrapper[4751]: I0227 16:38:23.372836 4751 scope.go:117] "RemoveContainer" containerID="a2be35fb931ad32d4203d5790923f1927698671435a08b8e1b56d308b968e625" Feb 27 16:38:28 crc kubenswrapper[4751]: I0227 16:38:28.917847 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:38:28 crc kubenswrapper[4751]: I0227 16:38:28.919138 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:38:53 crc kubenswrapper[4751]: I0227 16:38:53.805189 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vpxjd"] Feb 27 16:38:53 crc kubenswrapper[4751]: I0227 16:38:53.806658 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovn-controller" containerID="cri-o://58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082" gracePeriod=30 Feb 27 16:38:53 crc kubenswrapper[4751]: I0227 16:38:53.806750 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354" gracePeriod=30 Feb 27 16:38:53 crc kubenswrapper[4751]: I0227 16:38:53.806772 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="kube-rbac-proxy-node" containerID="cri-o://684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028" gracePeriod=30 Feb 27 16:38:53 crc kubenswrapper[4751]: I0227 16:38:53.806842 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="northd" containerID="cri-o://c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d" gracePeriod=30 Feb 27 16:38:53 crc kubenswrapper[4751]: I0227 16:38:53.806887 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovn-acl-logging" containerID="cri-o://787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948" gracePeriod=30 Feb 27 16:38:53 crc kubenswrapper[4751]: I0227 16:38:53.806821 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="sbdb" containerID="cri-o://f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b" gracePeriod=30 Feb 27 16:38:53 crc kubenswrapper[4751]: I0227 16:38:53.806751 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="nbdb" containerID="cri-o://410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc" gracePeriod=30 Feb 27 16:38:53 crc kubenswrapper[4751]: I0227 16:38:53.855058 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" containerID="cri-o://60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f" gracePeriod=30 Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.155314 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/3.log" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.157513 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovn-acl-logging/0.log" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.158047 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovn-controller/0.log" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.158484 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180237 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-systemd\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180639 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-netns\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180720 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-bin\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180742 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-openvswitch\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180758 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-node-log\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180794 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-log-socket\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180819 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180864 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-ovn-kubernetes\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180886 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-slash\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180911 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-script-lib\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180957 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-systemd-units\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.180984 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-netd\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.181002 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-etc-openvswitch\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.181043 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-ovn\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.181078 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/45a3f89b-11cb-4336-962d-c6835c5f758e-ovn-node-metrics-cert\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.181119 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-config\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.181138 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-env-overrides\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.181154 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-kubelet\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.181200 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-var-lib-openvswitch\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.181222 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nm8jw\" (UniqueName: \"kubernetes.io/projected/45a3f89b-11cb-4336-962d-c6835c5f758e-kube-api-access-nm8jw\") pod \"45a3f89b-11cb-4336-962d-c6835c5f758e\" (UID: \"45a3f89b-11cb-4336-962d-c6835c5f758e\") " Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.181917 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.181984 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182033 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182057 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182078 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-node-log" (OuterVolumeSpecName: "node-log") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182122 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-log-socket" (OuterVolumeSpecName: "log-socket") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182140 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182155 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182193 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-slash" (OuterVolumeSpecName: "host-slash") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182518 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182571 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182620 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182673 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.182953 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183004 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183029 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183119 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183222 4751 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183270 4751 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183283 4751 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-slash\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183291 4751 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-systemd-units\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183299 4751 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-netd\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183328 4751 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183337 4751 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183344 4751 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183353 4751 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/45a3f89b-11cb-4336-962d-c6835c5f758e-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183361 4751 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-kubelet\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183370 4751 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183384 4751 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-run-netns\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183463 4751 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-cni-bin\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183474 4751 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183484 4751 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-node-log\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183493 4751 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-log-socket\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.183506 4751 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.186613 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45a3f89b-11cb-4336-962d-c6835c5f758e-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.186971 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45a3f89b-11cb-4336-962d-c6835c5f758e-kube-api-access-nm8jw" (OuterVolumeSpecName: "kube-api-access-nm8jw") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "kube-api-access-nm8jw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.198064 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "45a3f89b-11cb-4336-962d-c6835c5f758e" (UID: "45a3f89b-11cb-4336-962d-c6835c5f758e"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224312 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-jn79x"] Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.224641 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovn-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224661 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovn-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.224682 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224724 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.224741 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="kube-rbac-proxy-node" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224755 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="kube-rbac-proxy-node" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.224779 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovn-acl-logging" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224793 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovn-acl-logging" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.224814 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="kube-rbac-proxy-ovn-metrics" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224827 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="kube-rbac-proxy-ovn-metrics" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.224843 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224855 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.224869 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224883 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.224900 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="northd" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224913 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="northd" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.224933 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="nbdb" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224945 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="nbdb" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.224976 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="kubecfg-setup" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.224992 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="kubecfg-setup" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.225016 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="sbdb" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225034 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="sbdb" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.225057 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225070 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.225084 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32fbc6ca-5072-482c-bd83-c773f3add3fa" containerName="oc" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225097 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="32fbc6ca-5072-482c-bd83-c773f3add3fa" containerName="oc" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225276 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="nbdb" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225301 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225317 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovn-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225328 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="northd" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225347 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovn-acl-logging" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225369 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="kube-rbac-proxy-ovn-metrics" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225390 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="32fbc6ca-5072-482c-bd83-c773f3add3fa" containerName="oc" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225450 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225469 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225487 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225508 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="sbdb" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.225521 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="kube-rbac-proxy-node" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.226190 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.226213 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.226382 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerName="ovnkube-controller" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.229140 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284335 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ghck\" (UniqueName: \"kubernetes.io/projected/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-kube-api-access-9ghck\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284419 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-run-openvswitch\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284448 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-ovnkube-script-lib\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284476 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-run-ovn\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284503 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-systemd-units\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284531 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-run-ovn-kubernetes\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284552 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-ovnkube-config\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284580 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-etc-openvswitch\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284602 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-cni-bin\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284627 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-env-overrides\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284650 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-node-log\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284672 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-log-socket\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284713 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-kubelet\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284771 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-var-lib-openvswitch\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.284792 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-run-netns\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.285213 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-slash\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.285260 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-run-systemd\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.285312 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.285338 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-ovn-node-metrics-cert\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.285576 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-cni-netd\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.286424 4751 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/45a3f89b-11cb-4336-962d-c6835c5f758e-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.286448 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nm8jw\" (UniqueName: \"kubernetes.io/projected/45a3f89b-11cb-4336-962d-c6835c5f758e-kube-api-access-nm8jw\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.286465 4751 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/45a3f89b-11cb-4336-962d-c6835c5f758e-run-systemd\") on node \"crc\" DevicePath \"\"" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.387788 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-etc-openvswitch\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.387829 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-cni-bin\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.387846 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-env-overrides\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.387863 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-node-log\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.387941 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-etc-openvswitch\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.387959 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-cni-bin\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.387970 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-node-log\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388073 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-log-socket\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388503 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-env-overrides\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.387877 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-log-socket\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388639 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-kubelet\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388695 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-var-lib-openvswitch\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388749 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-run-netns\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388807 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-var-lib-openvswitch\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388841 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-slash\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388892 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-run-systemd\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388922 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-run-systemd\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388902 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-slash\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388940 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-run-netns\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388956 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-ovn-node-metrics-cert\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.388846 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-kubelet\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389002 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389047 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-cni-netd\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389061 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389075 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ghck\" (UniqueName: \"kubernetes.io/projected/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-kube-api-access-9ghck\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389133 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-cni-netd\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389157 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-run-openvswitch\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389206 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-ovnkube-script-lib\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389229 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-run-openvswitch\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389260 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-run-ovn\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389311 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-systemd-units\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389374 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-run-ovn-kubernetes\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389424 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-run-ovn\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389462 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-systemd-units\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389458 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-ovnkube-config\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389530 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-host-run-ovn-kubernetes\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.389843 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-ovnkube-script-lib\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.390529 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-ovnkube-config\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.395091 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-ovn-node-metrics-cert\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.412849 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ghck\" (UniqueName: \"kubernetes.io/projected/9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6-kube-api-access-9ghck\") pod \"ovnkube-node-jn79x\" (UID: \"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.547496 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.718046 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovnkube-controller/3.log" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.721616 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovn-acl-logging/0.log" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722239 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vpxjd_45a3f89b-11cb-4336-962d-c6835c5f758e/ovn-controller/0.log" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722785 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f" exitCode=0 Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722822 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b" exitCode=0 Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722838 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc" exitCode=0 Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722851 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d" exitCode=0 Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722864 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354" exitCode=0 Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722875 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028" exitCode=0 Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722887 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948" exitCode=143 Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722896 4751 generic.go:334] "Generic (PLEG): container finished" podID="45a3f89b-11cb-4336-962d-c6835c5f758e" containerID="58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082" exitCode=143 Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722932 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722953 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.722986 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723004 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723021 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723033 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723053 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723066 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723078 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723086 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723094 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723102 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723110 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723117 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723125 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723132 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723142 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723152 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723160 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723167 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723174 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723180 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723187 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723194 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723202 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723208 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723215 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723224 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723235 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723243 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723254 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723261 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723269 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723276 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723283 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723292 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723300 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723308 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723317 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vpxjd" event={"ID":"45a3f89b-11cb-4336-962d-c6835c5f758e","Type":"ContainerDied","Data":"00077b69e6e8ec8364485f5a7131671543403b6040e8e2504d162bbcdceaf0bc"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723328 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723337 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723344 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723352 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723360 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723367 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723376 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723383 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723390 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723422 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.723445 4751 scope.go:117] "RemoveContainer" containerID="60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.726657 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/2.log" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.727203 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/1.log" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.727255 4751 generic.go:334] "Generic (PLEG): container finished" podID="dc07559e-a5c7-458c-b3ec-646981b798c1" containerID="078030dbb5992de0c1bc4d1619c873a920f892cb1faa8ac404dbc20bb29ea6b8" exitCode=2 Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.727323 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4jc4n" event={"ID":"dc07559e-a5c7-458c-b3ec-646981b798c1","Type":"ContainerDied","Data":"078030dbb5992de0c1bc4d1619c873a920f892cb1faa8ac404dbc20bb29ea6b8"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.727352 4751 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.727965 4751 scope.go:117] "RemoveContainer" containerID="078030dbb5992de0c1bc4d1619c873a920f892cb1faa8ac404dbc20bb29ea6b8" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.730736 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" event={"ID":"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6","Type":"ContainerStarted","Data":"7ee8a9933e4e8edd816dfac6a2a003ea4f02fc6f93021a6e179cc22f8245ccdf"} Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.782444 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.784063 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vpxjd"] Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.787545 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vpxjd"] Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.834601 4751 scope.go:117] "RemoveContainer" containerID="f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.857704 4751 scope.go:117] "RemoveContainer" containerID="410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.869430 4751 scope.go:117] "RemoveContainer" containerID="c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.880641 4751 scope.go:117] "RemoveContainer" containerID="a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.899861 4751 scope.go:117] "RemoveContainer" containerID="684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.915938 4751 scope.go:117] "RemoveContainer" containerID="787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.954437 4751 scope.go:117] "RemoveContainer" containerID="58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.971048 4751 scope.go:117] "RemoveContainer" containerID="3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.983307 4751 scope.go:117] "RemoveContainer" containerID="60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.983811 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": container with ID starting with 60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f not found: ID does not exist" containerID="60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.983839 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f"} err="failed to get container status \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": rpc error: code = NotFound desc = could not find container \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": container with ID starting with 60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.983860 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.984074 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\": container with ID starting with d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e not found: ID does not exist" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.984091 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e"} err="failed to get container status \"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\": rpc error: code = NotFound desc = could not find container \"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\": container with ID starting with d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.984105 4751 scope.go:117] "RemoveContainer" containerID="f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.984492 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\": container with ID starting with f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b not found: ID does not exist" containerID="f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.984510 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b"} err="failed to get container status \"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\": rpc error: code = NotFound desc = could not find container \"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\": container with ID starting with f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.984523 4751 scope.go:117] "RemoveContainer" containerID="410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.984792 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\": container with ID starting with 410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc not found: ID does not exist" containerID="410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.984812 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc"} err="failed to get container status \"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\": rpc error: code = NotFound desc = could not find container \"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\": container with ID starting with 410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.984830 4751 scope.go:117] "RemoveContainer" containerID="c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.985034 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\": container with ID starting with c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d not found: ID does not exist" containerID="c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.985048 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d"} err="failed to get container status \"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\": rpc error: code = NotFound desc = could not find container \"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\": container with ID starting with c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.985061 4751 scope.go:117] "RemoveContainer" containerID="a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.985287 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\": container with ID starting with a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354 not found: ID does not exist" containerID="a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.985301 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354"} err="failed to get container status \"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\": rpc error: code = NotFound desc = could not find container \"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\": container with ID starting with a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.985314 4751 scope.go:117] "RemoveContainer" containerID="684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.985516 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\": container with ID starting with 684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028 not found: ID does not exist" containerID="684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.985535 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028"} err="failed to get container status \"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\": rpc error: code = NotFound desc = could not find container \"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\": container with ID starting with 684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.985546 4751 scope.go:117] "RemoveContainer" containerID="787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.985710 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\": container with ID starting with 787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948 not found: ID does not exist" containerID="787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.985726 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948"} err="failed to get container status \"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\": rpc error: code = NotFound desc = could not find container \"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\": container with ID starting with 787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.985736 4751 scope.go:117] "RemoveContainer" containerID="58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.985948 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\": container with ID starting with 58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082 not found: ID does not exist" containerID="58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.985962 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082"} err="failed to get container status \"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\": rpc error: code = NotFound desc = could not find container \"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\": container with ID starting with 58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.985973 4751 scope.go:117] "RemoveContainer" containerID="3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de" Feb 27 16:38:54 crc kubenswrapper[4751]: E0227 16:38:54.986221 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\": container with ID starting with 3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de not found: ID does not exist" containerID="3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.986237 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de"} err="failed to get container status \"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\": rpc error: code = NotFound desc = could not find container \"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\": container with ID starting with 3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.986249 4751 scope.go:117] "RemoveContainer" containerID="60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.986463 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f"} err="failed to get container status \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": rpc error: code = NotFound desc = could not find container \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": container with ID starting with 60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.986479 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.986677 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e"} err="failed to get container status \"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\": rpc error: code = NotFound desc = could not find container \"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\": container with ID starting with d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.986696 4751 scope.go:117] "RemoveContainer" containerID="f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.987160 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b"} err="failed to get container status \"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\": rpc error: code = NotFound desc = could not find container \"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\": container with ID starting with f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.987183 4751 scope.go:117] "RemoveContainer" containerID="410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.987371 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc"} err="failed to get container status \"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\": rpc error: code = NotFound desc = could not find container \"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\": container with ID starting with 410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.987392 4751 scope.go:117] "RemoveContainer" containerID="c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.987617 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d"} err="failed to get container status \"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\": rpc error: code = NotFound desc = could not find container \"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\": container with ID starting with c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.987637 4751 scope.go:117] "RemoveContainer" containerID="a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.987840 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354"} err="failed to get container status \"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\": rpc error: code = NotFound desc = could not find container \"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\": container with ID starting with a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.987862 4751 scope.go:117] "RemoveContainer" containerID="684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.988091 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028"} err="failed to get container status \"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\": rpc error: code = NotFound desc = could not find container \"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\": container with ID starting with 684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.988108 4751 scope.go:117] "RemoveContainer" containerID="787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.988326 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948"} err="failed to get container status \"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\": rpc error: code = NotFound desc = could not find container \"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\": container with ID starting with 787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.988348 4751 scope.go:117] "RemoveContainer" containerID="58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.988564 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082"} err="failed to get container status \"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\": rpc error: code = NotFound desc = could not find container \"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\": container with ID starting with 58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.988586 4751 scope.go:117] "RemoveContainer" containerID="3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.988795 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de"} err="failed to get container status \"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\": rpc error: code = NotFound desc = could not find container \"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\": container with ID starting with 3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.988810 4751 scope.go:117] "RemoveContainer" containerID="60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.989001 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f"} err="failed to get container status \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": rpc error: code = NotFound desc = could not find container \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": container with ID starting with 60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.989017 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.989460 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e"} err="failed to get container status \"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\": rpc error: code = NotFound desc = could not find container \"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\": container with ID starting with d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.989487 4751 scope.go:117] "RemoveContainer" containerID="f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.989768 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b"} err="failed to get container status \"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\": rpc error: code = NotFound desc = could not find container \"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\": container with ID starting with f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.989785 4751 scope.go:117] "RemoveContainer" containerID="410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.989990 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc"} err="failed to get container status \"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\": rpc error: code = NotFound desc = could not find container \"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\": container with ID starting with 410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.990005 4751 scope.go:117] "RemoveContainer" containerID="c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.990159 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d"} err="failed to get container status \"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\": rpc error: code = NotFound desc = could not find container \"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\": container with ID starting with c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.990173 4751 scope.go:117] "RemoveContainer" containerID="a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.990849 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354"} err="failed to get container status \"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\": rpc error: code = NotFound desc = could not find container \"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\": container with ID starting with a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.990865 4751 scope.go:117] "RemoveContainer" containerID="684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991056 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028"} err="failed to get container status \"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\": rpc error: code = NotFound desc = could not find container \"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\": container with ID starting with 684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991071 4751 scope.go:117] "RemoveContainer" containerID="787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991243 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948"} err="failed to get container status \"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\": rpc error: code = NotFound desc = could not find container \"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\": container with ID starting with 787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991257 4751 scope.go:117] "RemoveContainer" containerID="58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991441 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082"} err="failed to get container status \"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\": rpc error: code = NotFound desc = could not find container \"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\": container with ID starting with 58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991453 4751 scope.go:117] "RemoveContainer" containerID="3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991631 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de"} err="failed to get container status \"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\": rpc error: code = NotFound desc = could not find container \"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\": container with ID starting with 3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991643 4751 scope.go:117] "RemoveContainer" containerID="60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991812 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f"} err="failed to get container status \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": rpc error: code = NotFound desc = could not find container \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": container with ID starting with 60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991826 4751 scope.go:117] "RemoveContainer" containerID="d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991984 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e"} err="failed to get container status \"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\": rpc error: code = NotFound desc = could not find container \"d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e\": container with ID starting with d39c71aa9419d51308598817a0f77ae020f5763965a1905f76e573b67002232e not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.991998 4751 scope.go:117] "RemoveContainer" containerID="f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.993610 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b"} err="failed to get container status \"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\": rpc error: code = NotFound desc = could not find container \"f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b\": container with ID starting with f37849b794c5e8f5bfd379e94735a38f6539f20360124eb9c2e8995612e9c99b not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.993665 4751 scope.go:117] "RemoveContainer" containerID="410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.993971 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc"} err="failed to get container status \"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\": rpc error: code = NotFound desc = could not find container \"410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc\": container with ID starting with 410275ec4afc7a95200361c75f5b9156f920b8343ffaac4626afa68016f4e1dc not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.993992 4751 scope.go:117] "RemoveContainer" containerID="c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.994193 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d"} err="failed to get container status \"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\": rpc error: code = NotFound desc = could not find container \"c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d\": container with ID starting with c9a01cb57f61499e9b1440ed76732aeb26e7a51059f7fb59c72a2e5cca52ba0d not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.994207 4751 scope.go:117] "RemoveContainer" containerID="a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.994367 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354"} err="failed to get container status \"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\": rpc error: code = NotFound desc = could not find container \"a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354\": container with ID starting with a5acb3f9f99576c8eec4c595660e5c9996db06410ff63e6a8f2902e70654b354 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.994382 4751 scope.go:117] "RemoveContainer" containerID="684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.994624 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028"} err="failed to get container status \"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\": rpc error: code = NotFound desc = could not find container \"684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028\": container with ID starting with 684edc31bd8d130b5d4f0ca7775cb40894b200f8df7afcf31f746f4fb446f028 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.994657 4751 scope.go:117] "RemoveContainer" containerID="787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.994897 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948"} err="failed to get container status \"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\": rpc error: code = NotFound desc = could not find container \"787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948\": container with ID starting with 787d8f3b52e0c6cd55d3a17891d720ee17513ac16d4775bee07540591dac9948 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.994923 4751 scope.go:117] "RemoveContainer" containerID="58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.995209 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082"} err="failed to get container status \"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\": rpc error: code = NotFound desc = could not find container \"58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082\": container with ID starting with 58417651c1c6042c2cbdf7cf496203cbe121d735044a3b9492fde3c8ca010082 not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.995233 4751 scope.go:117] "RemoveContainer" containerID="3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.995456 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de"} err="failed to get container status \"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\": rpc error: code = NotFound desc = could not find container \"3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de\": container with ID starting with 3410ed05fd195d10b59445f0f5f24557c8bfeb5a1f48f153185190f266def7de not found: ID does not exist" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.995481 4751 scope.go:117] "RemoveContainer" containerID="60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f" Feb 27 16:38:54 crc kubenswrapper[4751]: I0227 16:38:54.995698 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f"} err="failed to get container status \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": rpc error: code = NotFound desc = could not find container \"60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f\": container with ID starting with 60452d7f6858b266d82c1dbacaf81608cc7beff3d8a9e1e72ad9acd1f672930f not found: ID does not exist" Feb 27 16:38:55 crc kubenswrapper[4751]: I0227 16:38:55.757436 4751 generic.go:334] "Generic (PLEG): container finished" podID="9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6" containerID="18531fe6a13fe59afed1723a6f31733567010ec875c9777c860aac55d2fec1a1" exitCode=0 Feb 27 16:38:55 crc kubenswrapper[4751]: I0227 16:38:55.757596 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" event={"ID":"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6","Type":"ContainerDied","Data":"18531fe6a13fe59afed1723a6f31733567010ec875c9777c860aac55d2fec1a1"} Feb 27 16:38:55 crc kubenswrapper[4751]: I0227 16:38:55.764490 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/2.log" Feb 27 16:38:55 crc kubenswrapper[4751]: I0227 16:38:55.766678 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/1.log" Feb 27 16:38:55 crc kubenswrapper[4751]: I0227 16:38:55.766743 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4jc4n" event={"ID":"dc07559e-a5c7-458c-b3ec-646981b798c1","Type":"ContainerStarted","Data":"38af92e9002769bb6ca8d3c6cf31f19ee6f594ec6ac06a5207579be79290124b"} Feb 27 16:38:56 crc kubenswrapper[4751]: I0227 16:38:56.531032 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45a3f89b-11cb-4336-962d-c6835c5f758e" path="/var/lib/kubelet/pods/45a3f89b-11cb-4336-962d-c6835c5f758e/volumes" Feb 27 16:38:56 crc kubenswrapper[4751]: I0227 16:38:56.781935 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" event={"ID":"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6","Type":"ContainerStarted","Data":"925fbcead76dd4c1b36323c277cde869bfcd7c3b0e67af37c8508e09da4c179d"} Feb 27 16:38:56 crc kubenswrapper[4751]: I0227 16:38:56.781995 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" event={"ID":"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6","Type":"ContainerStarted","Data":"e56b1baa075d713681f26352f2aa2003e9058120608fcc7de644819acc1965ab"} Feb 27 16:38:56 crc kubenswrapper[4751]: I0227 16:38:56.782018 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" event={"ID":"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6","Type":"ContainerStarted","Data":"0a262cf086939c37814d28022c16e1ee092e9edf8569c2fc397396525b61b67e"} Feb 27 16:38:56 crc kubenswrapper[4751]: I0227 16:38:56.782064 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" event={"ID":"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6","Type":"ContainerStarted","Data":"354ad1b27ea860d26758d4b4bee7f907d9c5dd2c2a3349fb3b837cbf18371a4d"} Feb 27 16:38:56 crc kubenswrapper[4751]: I0227 16:38:56.782082 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" event={"ID":"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6","Type":"ContainerStarted","Data":"70e9334befc1d637fe243d4aa380cf0da96684cd0ff0d93b268530981a69cc31"} Feb 27 16:38:56 crc kubenswrapper[4751]: I0227 16:38:56.782097 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" event={"ID":"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6","Type":"ContainerStarted","Data":"d0abb15f919ef5be660cf79fa620186449e8a6d57969d097f9edafb5e115a74b"} Feb 27 16:38:58 crc kubenswrapper[4751]: I0227 16:38:58.918204 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:38:58 crc kubenswrapper[4751]: I0227 16:38:58.919247 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:38:59 crc kubenswrapper[4751]: I0227 16:38:59.831306 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" event={"ID":"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6","Type":"ContainerStarted","Data":"368a4f3db2b52a89c63e34ec72251d5171b15d94954fc2dc2e9164d8aaaf3af3"} Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.622209 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-8vpb7"] Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.624037 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.626242 4751 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-54vq6" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.626625 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.626812 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.628090 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.714585 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljqc2\" (UniqueName: \"kubernetes.io/projected/54ea9e35-c94b-40f3-b454-55dca7de0349-kube-api-access-ljqc2\") pod \"crc-storage-crc-8vpb7\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.714669 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/54ea9e35-c94b-40f3-b454-55dca7de0349-crc-storage\") pod \"crc-storage-crc-8vpb7\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.714711 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/54ea9e35-c94b-40f3-b454-55dca7de0349-node-mnt\") pod \"crc-storage-crc-8vpb7\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.815884 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/54ea9e35-c94b-40f3-b454-55dca7de0349-node-mnt\") pod \"crc-storage-crc-8vpb7\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.816315 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljqc2\" (UniqueName: \"kubernetes.io/projected/54ea9e35-c94b-40f3-b454-55dca7de0349-kube-api-access-ljqc2\") pod \"crc-storage-crc-8vpb7\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.816365 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/54ea9e35-c94b-40f3-b454-55dca7de0349-crc-storage\") pod \"crc-storage-crc-8vpb7\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.816386 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/54ea9e35-c94b-40f3-b454-55dca7de0349-node-mnt\") pod \"crc-storage-crc-8vpb7\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.817084 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/54ea9e35-c94b-40f3-b454-55dca7de0349-crc-storage\") pod \"crc-storage-crc-8vpb7\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.850612 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljqc2\" (UniqueName: \"kubernetes.io/projected/54ea9e35-c94b-40f3-b454-55dca7de0349-kube-api-access-ljqc2\") pod \"crc-storage-crc-8vpb7\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.858682 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" event={"ID":"9c59ce0c-f4b8-4b17-8d3f-f1d91675bee6","Type":"ContainerStarted","Data":"0fb5804ab959fd3c19e5c2218942a77a62ea98f630f97ec7ba3139e5cb493182"} Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.859189 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.859246 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.888442 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" podStartSLOduration=8.88842286 podStartE2EDuration="8.88842286s" podCreationTimestamp="2026-02-27 16:38:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:39:02.888268316 +0000 UTC m=+905.035282763" watchObservedRunningTime="2026-02-27 16:39:02.88842286 +0000 UTC m=+905.035437327" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.903928 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:39:02 crc kubenswrapper[4751]: I0227 16:39:02.955077 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: E0227 16:39:02.982032 4751 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8vpb7_crc-storage_54ea9e35-c94b-40f3-b454-55dca7de0349_0(235362464bfc953f8542ef4eebd0f9183b8b4b19589f0c9a18691aea67779497): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:39:02 crc kubenswrapper[4751]: E0227 16:39:02.982111 4751 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8vpb7_crc-storage_54ea9e35-c94b-40f3-b454-55dca7de0349_0(235362464bfc953f8542ef4eebd0f9183b8b4b19589f0c9a18691aea67779497): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: E0227 16:39:02.982140 4751 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8vpb7_crc-storage_54ea9e35-c94b-40f3-b454-55dca7de0349_0(235362464bfc953f8542ef4eebd0f9183b8b4b19589f0c9a18691aea67779497): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:02 crc kubenswrapper[4751]: E0227 16:39:02.982193 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-8vpb7_crc-storage(54ea9e35-c94b-40f3-b454-55dca7de0349)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-8vpb7_crc-storage(54ea9e35-c94b-40f3-b454-55dca7de0349)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8vpb7_crc-storage_54ea9e35-c94b-40f3-b454-55dca7de0349_0(235362464bfc953f8542ef4eebd0f9183b8b4b19589f0c9a18691aea67779497): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-8vpb7" podUID="54ea9e35-c94b-40f3-b454-55dca7de0349" Feb 27 16:39:03 crc kubenswrapper[4751]: I0227 16:39:03.276291 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-8vpb7"] Feb 27 16:39:03 crc kubenswrapper[4751]: I0227 16:39:03.865483 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:03 crc kubenswrapper[4751]: I0227 16:39:03.866997 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:39:03 crc kubenswrapper[4751]: I0227 16:39:03.867447 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:03 crc kubenswrapper[4751]: I0227 16:39:03.912176 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:39:03 crc kubenswrapper[4751]: E0227 16:39:03.930170 4751 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8vpb7_crc-storage_54ea9e35-c94b-40f3-b454-55dca7de0349_0(c5dc36ea69a030b9da02fab5cef897a83453d1406d5061d950dd8bf454a1069a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 27 16:39:03 crc kubenswrapper[4751]: E0227 16:39:03.930241 4751 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8vpb7_crc-storage_54ea9e35-c94b-40f3-b454-55dca7de0349_0(c5dc36ea69a030b9da02fab5cef897a83453d1406d5061d950dd8bf454a1069a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:03 crc kubenswrapper[4751]: E0227 16:39:03.930263 4751 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8vpb7_crc-storage_54ea9e35-c94b-40f3-b454-55dca7de0349_0(c5dc36ea69a030b9da02fab5cef897a83453d1406d5061d950dd8bf454a1069a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:03 crc kubenswrapper[4751]: E0227 16:39:03.930312 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-8vpb7_crc-storage(54ea9e35-c94b-40f3-b454-55dca7de0349)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-8vpb7_crc-storage(54ea9e35-c94b-40f3-b454-55dca7de0349)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8vpb7_crc-storage_54ea9e35-c94b-40f3-b454-55dca7de0349_0(c5dc36ea69a030b9da02fab5cef897a83453d1406d5061d950dd8bf454a1069a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-8vpb7" podUID="54ea9e35-c94b-40f3-b454-55dca7de0349" Feb 27 16:39:18 crc kubenswrapper[4751]: I0227 16:39:18.520515 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:18 crc kubenswrapper[4751]: I0227 16:39:18.523539 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:18 crc kubenswrapper[4751]: I0227 16:39:18.858659 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-8vpb7"] Feb 27 16:39:18 crc kubenswrapper[4751]: I0227 16:39:18.865020 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 16:39:18 crc kubenswrapper[4751]: I0227 16:39:18.982631 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-8vpb7" event={"ID":"54ea9e35-c94b-40f3-b454-55dca7de0349","Type":"ContainerStarted","Data":"2e8f607d11d56710a39f905ffbbef1c2921ecb0803abce7ca5aa4f464a3e9c31"} Feb 27 16:39:21 crc kubenswrapper[4751]: I0227 16:39:21.002274 4751 generic.go:334] "Generic (PLEG): container finished" podID="54ea9e35-c94b-40f3-b454-55dca7de0349" containerID="70b4dd0b4546b7bb0db5b503b6ae71ee465d5d71f3169e370364edde9173b814" exitCode=0 Feb 27 16:39:21 crc kubenswrapper[4751]: I0227 16:39:21.002392 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-8vpb7" event={"ID":"54ea9e35-c94b-40f3-b454-55dca7de0349","Type":"ContainerDied","Data":"70b4dd0b4546b7bb0db5b503b6ae71ee465d5d71f3169e370364edde9173b814"} Feb 27 16:39:22 crc kubenswrapper[4751]: I0227 16:39:22.315629 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:22 crc kubenswrapper[4751]: I0227 16:39:22.332656 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/54ea9e35-c94b-40f3-b454-55dca7de0349-node-mnt\") pod \"54ea9e35-c94b-40f3-b454-55dca7de0349\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " Feb 27 16:39:22 crc kubenswrapper[4751]: I0227 16:39:22.332816 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljqc2\" (UniqueName: \"kubernetes.io/projected/54ea9e35-c94b-40f3-b454-55dca7de0349-kube-api-access-ljqc2\") pod \"54ea9e35-c94b-40f3-b454-55dca7de0349\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " Feb 27 16:39:22 crc kubenswrapper[4751]: I0227 16:39:22.332866 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/54ea9e35-c94b-40f3-b454-55dca7de0349-crc-storage\") pod \"54ea9e35-c94b-40f3-b454-55dca7de0349\" (UID: \"54ea9e35-c94b-40f3-b454-55dca7de0349\") " Feb 27 16:39:22 crc kubenswrapper[4751]: I0227 16:39:22.333972 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54ea9e35-c94b-40f3-b454-55dca7de0349-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "54ea9e35-c94b-40f3-b454-55dca7de0349" (UID: "54ea9e35-c94b-40f3-b454-55dca7de0349"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:39:22 crc kubenswrapper[4751]: I0227 16:39:22.339631 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54ea9e35-c94b-40f3-b454-55dca7de0349-kube-api-access-ljqc2" (OuterVolumeSpecName: "kube-api-access-ljqc2") pod "54ea9e35-c94b-40f3-b454-55dca7de0349" (UID: "54ea9e35-c94b-40f3-b454-55dca7de0349"). InnerVolumeSpecName "kube-api-access-ljqc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:39:22 crc kubenswrapper[4751]: I0227 16:39:22.348914 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54ea9e35-c94b-40f3-b454-55dca7de0349-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "54ea9e35-c94b-40f3-b454-55dca7de0349" (UID: "54ea9e35-c94b-40f3-b454-55dca7de0349"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:39:22 crc kubenswrapper[4751]: I0227 16:39:22.433973 4751 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/54ea9e35-c94b-40f3-b454-55dca7de0349-node-mnt\") on node \"crc\" DevicePath \"\"" Feb 27 16:39:22 crc kubenswrapper[4751]: I0227 16:39:22.434005 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljqc2\" (UniqueName: \"kubernetes.io/projected/54ea9e35-c94b-40f3-b454-55dca7de0349-kube-api-access-ljqc2\") on node \"crc\" DevicePath \"\"" Feb 27 16:39:22 crc kubenswrapper[4751]: I0227 16:39:22.434016 4751 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/54ea9e35-c94b-40f3-b454-55dca7de0349-crc-storage\") on node \"crc\" DevicePath \"\"" Feb 27 16:39:23 crc kubenswrapper[4751]: I0227 16:39:23.021062 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-8vpb7" event={"ID":"54ea9e35-c94b-40f3-b454-55dca7de0349","Type":"ContainerDied","Data":"2e8f607d11d56710a39f905ffbbef1c2921ecb0803abce7ca5aa4f464a3e9c31"} Feb 27 16:39:23 crc kubenswrapper[4751]: I0227 16:39:23.021123 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e8f607d11d56710a39f905ffbbef1c2921ecb0803abce7ca5aa4f464a3e9c31" Feb 27 16:39:23 crc kubenswrapper[4751]: I0227 16:39:23.021138 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8vpb7" Feb 27 16:39:23 crc kubenswrapper[4751]: I0227 16:39:23.441566 4751 scope.go:117] "RemoveContainer" containerID="1b78fbbbec971c56f84f987d09108ddcb21d1189a379396e1174678f4de8d0e6" Feb 27 16:39:24 crc kubenswrapper[4751]: I0227 16:39:24.031775 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4jc4n_dc07559e-a5c7-458c-b3ec-646981b798c1/kube-multus/2.log" Feb 27 16:39:24 crc kubenswrapper[4751]: I0227 16:39:24.578369 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jn79x" Feb 27 16:39:28 crc kubenswrapper[4751]: I0227 16:39:28.917866 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:39:28 crc kubenswrapper[4751]: I0227 16:39:28.917941 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:39:28 crc kubenswrapper[4751]: I0227 16:39:28.917988 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:39:28 crc kubenswrapper[4751]: I0227 16:39:28.918875 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6eb163b225e8b4061c0a49276f7f1481358603b35f8794f8c9ade9058836265d"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 16:39:28 crc kubenswrapper[4751]: I0227 16:39:28.918947 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://6eb163b225e8b4061c0a49276f7f1481358603b35f8794f8c9ade9058836265d" gracePeriod=600 Feb 27 16:39:29 crc kubenswrapper[4751]: I0227 16:39:29.077486 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="6eb163b225e8b4061c0a49276f7f1481358603b35f8794f8c9ade9058836265d" exitCode=0 Feb 27 16:39:29 crc kubenswrapper[4751]: I0227 16:39:29.077571 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"6eb163b225e8b4061c0a49276f7f1481358603b35f8794f8c9ade9058836265d"} Feb 27 16:39:29 crc kubenswrapper[4751]: I0227 16:39:29.077656 4751 scope.go:117] "RemoveContainer" containerID="ffc9c7cd2c6fc8ac46b1b7d570ff4efe965dd019a548ff518b61e9ff634572af" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.088519 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"9018816dbd90d84dbf45956d038f614eb1f6863111903b50bc2958c2e12ef97b"} Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.194013 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7"] Feb 27 16:39:30 crc kubenswrapper[4751]: E0227 16:39:30.197920 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54ea9e35-c94b-40f3-b454-55dca7de0349" containerName="storage" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.197959 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="54ea9e35-c94b-40f3-b454-55dca7de0349" containerName="storage" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.198062 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="54ea9e35-c94b-40f3-b454-55dca7de0349" containerName="storage" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.198817 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7"] Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.198908 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.201037 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.249075 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-util\") pod \"0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.249116 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-bundle\") pod \"0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.249170 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58nz2\" (UniqueName: \"kubernetes.io/projected/b5355a8f-edd7-4c96-94f6-0f2904459f73-kube-api-access-58nz2\") pod \"0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.350866 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-bundle\") pod \"0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.351040 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58nz2\" (UniqueName: \"kubernetes.io/projected/b5355a8f-edd7-4c96-94f6-0f2904459f73-kube-api-access-58nz2\") pod \"0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.351123 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-util\") pod \"0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.351437 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-bundle\") pod \"0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.351766 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-util\") pod \"0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.368514 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58nz2\" (UniqueName: \"kubernetes.io/projected/b5355a8f-edd7-4c96-94f6-0f2904459f73-kube-api-access-58nz2\") pod \"0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.512087 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:30 crc kubenswrapper[4751]: I0227 16:39:30.776586 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7"] Feb 27 16:39:30 crc kubenswrapper[4751]: W0227 16:39:30.784808 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5355a8f_edd7_4c96_94f6_0f2904459f73.slice/crio-1a6ff5514e84c4305ac01d5316117dae763a4bebbc6a236b9ce76d6b9ce9b9a5 WatchSource:0}: Error finding container 1a6ff5514e84c4305ac01d5316117dae763a4bebbc6a236b9ce76d6b9ce9b9a5: Status 404 returned error can't find the container with id 1a6ff5514e84c4305ac01d5316117dae763a4bebbc6a236b9ce76d6b9ce9b9a5 Feb 27 16:39:31 crc kubenswrapper[4751]: I0227 16:39:31.095144 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" event={"ID":"b5355a8f-edd7-4c96-94f6-0f2904459f73","Type":"ContainerStarted","Data":"8da7b0150bc4be2673b1d07766d057d547fb2fbab3df037b1c3b76e838d92df7"} Feb 27 16:39:31 crc kubenswrapper[4751]: I0227 16:39:31.095195 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" event={"ID":"b5355a8f-edd7-4c96-94f6-0f2904459f73","Type":"ContainerStarted","Data":"1a6ff5514e84c4305ac01d5316117dae763a4bebbc6a236b9ce76d6b9ce9b9a5"} Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.105069 4751 generic.go:334] "Generic (PLEG): container finished" podID="b5355a8f-edd7-4c96-94f6-0f2904459f73" containerID="8da7b0150bc4be2673b1d07766d057d547fb2fbab3df037b1c3b76e838d92df7" exitCode=0 Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.105141 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" event={"ID":"b5355a8f-edd7-4c96-94f6-0f2904459f73","Type":"ContainerDied","Data":"8da7b0150bc4be2673b1d07766d057d547fb2fbab3df037b1c3b76e838d92df7"} Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.345047 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vwj4g"] Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.348590 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.370434 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vwj4g"] Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.480502 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-utilities\") pod \"redhat-operators-vwj4g\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.480571 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-catalog-content\") pod \"redhat-operators-vwj4g\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.480651 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgxj5\" (UniqueName: \"kubernetes.io/projected/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-kube-api-access-tgxj5\") pod \"redhat-operators-vwj4g\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.582547 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-catalog-content\") pod \"redhat-operators-vwj4g\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.582677 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgxj5\" (UniqueName: \"kubernetes.io/projected/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-kube-api-access-tgxj5\") pod \"redhat-operators-vwj4g\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.582723 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-utilities\") pod \"redhat-operators-vwj4g\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.583175 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-catalog-content\") pod \"redhat-operators-vwj4g\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.583292 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-utilities\") pod \"redhat-operators-vwj4g\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.610725 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgxj5\" (UniqueName: \"kubernetes.io/projected/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-kube-api-access-tgxj5\") pod \"redhat-operators-vwj4g\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.677710 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:32 crc kubenswrapper[4751]: I0227 16:39:32.956001 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vwj4g"] Feb 27 16:39:33 crc kubenswrapper[4751]: I0227 16:39:33.116492 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj4g" event={"ID":"fcec1b8c-916f-43f2-9272-fde29b2ef0aa","Type":"ContainerStarted","Data":"98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71"} Feb 27 16:39:33 crc kubenswrapper[4751]: I0227 16:39:33.116548 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj4g" event={"ID":"fcec1b8c-916f-43f2-9272-fde29b2ef0aa","Type":"ContainerStarted","Data":"076415dc274dc0ce2596f206de7a50d531ca72fa15cc2e0758a34caeb0321b5b"} Feb 27 16:39:34 crc kubenswrapper[4751]: I0227 16:39:34.130833 4751 generic.go:334] "Generic (PLEG): container finished" podID="b5355a8f-edd7-4c96-94f6-0f2904459f73" containerID="4fae299a8fa486503abc8556dbbca5c2d3365fbd462bf7754cb489efd73eed3e" exitCode=0 Feb 27 16:39:34 crc kubenswrapper[4751]: I0227 16:39:34.131270 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" event={"ID":"b5355a8f-edd7-4c96-94f6-0f2904459f73","Type":"ContainerDied","Data":"4fae299a8fa486503abc8556dbbca5c2d3365fbd462bf7754cb489efd73eed3e"} Feb 27 16:39:34 crc kubenswrapper[4751]: I0227 16:39:34.136336 4751 generic.go:334] "Generic (PLEG): container finished" podID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerID="98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71" exitCode=0 Feb 27 16:39:34 crc kubenswrapper[4751]: I0227 16:39:34.136431 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj4g" event={"ID":"fcec1b8c-916f-43f2-9272-fde29b2ef0aa","Type":"ContainerDied","Data":"98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71"} Feb 27 16:39:35 crc kubenswrapper[4751]: I0227 16:39:35.145636 4751 generic.go:334] "Generic (PLEG): container finished" podID="b5355a8f-edd7-4c96-94f6-0f2904459f73" containerID="acab8f8fe5e9185a62e4cdd1edc1844a47c36fbef935495e73b22da1dc273f93" exitCode=0 Feb 27 16:39:35 crc kubenswrapper[4751]: I0227 16:39:35.145726 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" event={"ID":"b5355a8f-edd7-4c96-94f6-0f2904459f73","Type":"ContainerDied","Data":"acab8f8fe5e9185a62e4cdd1edc1844a47c36fbef935495e73b22da1dc273f93"} Feb 27 16:39:35 crc kubenswrapper[4751]: I0227 16:39:35.149093 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj4g" event={"ID":"fcec1b8c-916f-43f2-9272-fde29b2ef0aa","Type":"ContainerStarted","Data":"033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec"} Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.159480 4751 generic.go:334] "Generic (PLEG): container finished" podID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerID="033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec" exitCode=0 Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.159661 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj4g" event={"ID":"fcec1b8c-916f-43f2-9272-fde29b2ef0aa","Type":"ContainerDied","Data":"033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec"} Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.479778 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.639633 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-bundle\") pod \"b5355a8f-edd7-4c96-94f6-0f2904459f73\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.639756 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-util\") pod \"b5355a8f-edd7-4c96-94f6-0f2904459f73\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.639797 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58nz2\" (UniqueName: \"kubernetes.io/projected/b5355a8f-edd7-4c96-94f6-0f2904459f73-kube-api-access-58nz2\") pod \"b5355a8f-edd7-4c96-94f6-0f2904459f73\" (UID: \"b5355a8f-edd7-4c96-94f6-0f2904459f73\") " Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.641050 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-bundle" (OuterVolumeSpecName: "bundle") pod "b5355a8f-edd7-4c96-94f6-0f2904459f73" (UID: "b5355a8f-edd7-4c96-94f6-0f2904459f73"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.644990 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5355a8f-edd7-4c96-94f6-0f2904459f73-kube-api-access-58nz2" (OuterVolumeSpecName: "kube-api-access-58nz2") pod "b5355a8f-edd7-4c96-94f6-0f2904459f73" (UID: "b5355a8f-edd7-4c96-94f6-0f2904459f73"). InnerVolumeSpecName "kube-api-access-58nz2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.741555 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58nz2\" (UniqueName: \"kubernetes.io/projected/b5355a8f-edd7-4c96-94f6-0f2904459f73-kube-api-access-58nz2\") on node \"crc\" DevicePath \"\"" Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.741582 4751 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.891523 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-util" (OuterVolumeSpecName: "util") pod "b5355a8f-edd7-4c96-94f6-0f2904459f73" (UID: "b5355a8f-edd7-4c96-94f6-0f2904459f73"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:39:36 crc kubenswrapper[4751]: I0227 16:39:36.944368 4751 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b5355a8f-edd7-4c96-94f6-0f2904459f73-util\") on node \"crc\" DevicePath \"\"" Feb 27 16:39:37 crc kubenswrapper[4751]: I0227 16:39:37.179561 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" event={"ID":"b5355a8f-edd7-4c96-94f6-0f2904459f73","Type":"ContainerDied","Data":"1a6ff5514e84c4305ac01d5316117dae763a4bebbc6a236b9ce76d6b9ce9b9a5"} Feb 27 16:39:37 crc kubenswrapper[4751]: I0227 16:39:37.179637 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a6ff5514e84c4305ac01d5316117dae763a4bebbc6a236b9ce76d6b9ce9b9a5" Feb 27 16:39:37 crc kubenswrapper[4751]: I0227 16:39:37.179597 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7" Feb 27 16:39:37 crc kubenswrapper[4751]: I0227 16:39:37.182907 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj4g" event={"ID":"fcec1b8c-916f-43f2-9272-fde29b2ef0aa","Type":"ContainerStarted","Data":"be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3"} Feb 27 16:39:37 crc kubenswrapper[4751]: I0227 16:39:37.226184 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vwj4g" podStartSLOduration=2.422077738 podStartE2EDuration="5.226151395s" podCreationTimestamp="2026-02-27 16:39:32 +0000 UTC" firstStartedPulling="2026-02-27 16:39:34.139854714 +0000 UTC m=+936.286869201" lastFinishedPulling="2026-02-27 16:39:36.943928411 +0000 UTC m=+939.090942858" observedRunningTime="2026-02-27 16:39:37.219089518 +0000 UTC m=+939.366104005" watchObservedRunningTime="2026-02-27 16:39:37.226151395 +0000 UTC m=+939.373165882" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.681209 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb"] Feb 27 16:39:40 crc kubenswrapper[4751]: E0227 16:39:40.681722 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5355a8f-edd7-4c96-94f6-0f2904459f73" containerName="pull" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.681734 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5355a8f-edd7-4c96-94f6-0f2904459f73" containerName="pull" Feb 27 16:39:40 crc kubenswrapper[4751]: E0227 16:39:40.681747 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5355a8f-edd7-4c96-94f6-0f2904459f73" containerName="util" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.681753 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5355a8f-edd7-4c96-94f6-0f2904459f73" containerName="util" Feb 27 16:39:40 crc kubenswrapper[4751]: E0227 16:39:40.681782 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5355a8f-edd7-4c96-94f6-0f2904459f73" containerName="extract" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.681789 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5355a8f-edd7-4c96-94f6-0f2904459f73" containerName="extract" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.681887 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5355a8f-edd7-4c96-94f6-0f2904459f73" containerName="extract" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.682292 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.684125 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb"] Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.685278 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-lckqc" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.685458 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.685901 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.798455 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rddzz\" (UniqueName: \"kubernetes.io/projected/cd7678bf-01a7-443e-9a9f-0c1297607112-kube-api-access-rddzz\") pod \"nmstate-operator-75c5dccd6c-fxswb\" (UID: \"cd7678bf-01a7-443e-9a9f-0c1297607112\") " pod="openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.900181 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rddzz\" (UniqueName: \"kubernetes.io/projected/cd7678bf-01a7-443e-9a9f-0c1297607112-kube-api-access-rddzz\") pod \"nmstate-operator-75c5dccd6c-fxswb\" (UID: \"cd7678bf-01a7-443e-9a9f-0c1297607112\") " pod="openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.923213 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rddzz\" (UniqueName: \"kubernetes.io/projected/cd7678bf-01a7-443e-9a9f-0c1297607112-kube-api-access-rddzz\") pod \"nmstate-operator-75c5dccd6c-fxswb\" (UID: \"cd7678bf-01a7-443e-9a9f-0c1297607112\") " pod="openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb" Feb 27 16:39:40 crc kubenswrapper[4751]: I0227 16:39:40.994996 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb" Feb 27 16:39:41 crc kubenswrapper[4751]: I0227 16:39:41.439783 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb"] Feb 27 16:39:41 crc kubenswrapper[4751]: W0227 16:39:41.445374 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd7678bf_01a7_443e_9a9f_0c1297607112.slice/crio-ec0c0155a147e11719240f35f4b2e433cb9c7beb92fdcaf2ef86bf745bcf7480 WatchSource:0}: Error finding container ec0c0155a147e11719240f35f4b2e433cb9c7beb92fdcaf2ef86bf745bcf7480: Status 404 returned error can't find the container with id ec0c0155a147e11719240f35f4b2e433cb9c7beb92fdcaf2ef86bf745bcf7480 Feb 27 16:39:42 crc kubenswrapper[4751]: I0227 16:39:42.211413 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb" event={"ID":"cd7678bf-01a7-443e-9a9f-0c1297607112","Type":"ContainerStarted","Data":"ec0c0155a147e11719240f35f4b2e433cb9c7beb92fdcaf2ef86bf745bcf7480"} Feb 27 16:39:42 crc kubenswrapper[4751]: I0227 16:39:42.678502 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:42 crc kubenswrapper[4751]: I0227 16:39:42.678574 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:43 crc kubenswrapper[4751]: I0227 16:39:43.735031 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vwj4g" podUID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerName="registry-server" probeResult="failure" output=< Feb 27 16:39:43 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 16:39:43 crc kubenswrapper[4751]: > Feb 27 16:39:45 crc kubenswrapper[4751]: I0227 16:39:45.234001 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb" event={"ID":"cd7678bf-01a7-443e-9a9f-0c1297607112","Type":"ContainerStarted","Data":"93c191d597d0e527848db15093b42b70c3ce35e1c67b2f865568011e89a13e83"} Feb 27 16:39:45 crc kubenswrapper[4751]: I0227 16:39:45.254828 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-75c5dccd6c-fxswb" podStartSLOduration=2.107690569 podStartE2EDuration="5.254804503s" podCreationTimestamp="2026-02-27 16:39:40 +0000 UTC" firstStartedPulling="2026-02-27 16:39:41.448364783 +0000 UTC m=+943.595379250" lastFinishedPulling="2026-02-27 16:39:44.595478737 +0000 UTC m=+946.742493184" observedRunningTime="2026-02-27 16:39:45.251502405 +0000 UTC m=+947.398516892" watchObservedRunningTime="2026-02-27 16:39:45.254804503 +0000 UTC m=+947.401818980" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.328387 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-69594cc75-qtlsh"] Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.329855 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-69594cc75-qtlsh" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.332999 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-bqjp5" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.347261 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-69594cc75-qtlsh"] Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.355230 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-6mj44"] Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.356234 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.363938 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-786f45cff4-mvl65"] Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.366868 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.368501 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.381301 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-786f45cff4-mvl65"] Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.430989 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6s9hz\" (UniqueName: \"kubernetes.io/projected/b80ace26-6333-4a22-8c2c-74c9d023f1b7-kube-api-access-6s9hz\") pod \"nmstate-metrics-69594cc75-qtlsh\" (UID: \"b80ace26-6333-4a22-8c2c-74c9d023f1b7\") " pod="openshift-nmstate/nmstate-metrics-69594cc75-qtlsh" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.474776 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f"] Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.475601 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.480873 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.481171 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.481217 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-rlk7r" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.483200 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f"] Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.532170 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/aac8927a-2c6c-40e7-9357-9899dd63927b-dbus-socket\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.532209 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/aac8927a-2c6c-40e7-9357-9899dd63927b-nmstate-lock\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.532242 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4h5v\" (UniqueName: \"kubernetes.io/projected/740f3d53-7dcd-4f19-8c7e-35ea882c433f-kube-api-access-p4h5v\") pod \"nmstate-webhook-786f45cff4-mvl65\" (UID: \"740f3d53-7dcd-4f19-8c7e-35ea882c433f\") " pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.532473 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6s9hz\" (UniqueName: \"kubernetes.io/projected/b80ace26-6333-4a22-8c2c-74c9d023f1b7-kube-api-access-6s9hz\") pod \"nmstate-metrics-69594cc75-qtlsh\" (UID: \"b80ace26-6333-4a22-8c2c-74c9d023f1b7\") " pod="openshift-nmstate/nmstate-metrics-69594cc75-qtlsh" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.532517 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/aac8927a-2c6c-40e7-9357-9899dd63927b-ovs-socket\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.532537 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlwcb\" (UniqueName: \"kubernetes.io/projected/aac8927a-2c6c-40e7-9357-9899dd63927b-kube-api-access-hlwcb\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.532564 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/740f3d53-7dcd-4f19-8c7e-35ea882c433f-tls-key-pair\") pod \"nmstate-webhook-786f45cff4-mvl65\" (UID: \"740f3d53-7dcd-4f19-8c7e-35ea882c433f\") " pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.560072 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6s9hz\" (UniqueName: \"kubernetes.io/projected/b80ace26-6333-4a22-8c2c-74c9d023f1b7-kube-api-access-6s9hz\") pod \"nmstate-metrics-69594cc75-qtlsh\" (UID: \"b80ace26-6333-4a22-8c2c-74c9d023f1b7\") " pod="openshift-nmstate/nmstate-metrics-69594cc75-qtlsh" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.634097 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/740f3d53-7dcd-4f19-8c7e-35ea882c433f-tls-key-pair\") pod \"nmstate-webhook-786f45cff4-mvl65\" (UID: \"740f3d53-7dcd-4f19-8c7e-35ea882c433f\") " pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.634164 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/aac8927a-2c6c-40e7-9357-9899dd63927b-dbus-socket\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.634187 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/aac8927a-2c6c-40e7-9357-9899dd63927b-nmstate-lock\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.634205 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4h5v\" (UniqueName: \"kubernetes.io/projected/740f3d53-7dcd-4f19-8c7e-35ea882c433f-kube-api-access-p4h5v\") pod \"nmstate-webhook-786f45cff4-mvl65\" (UID: \"740f3d53-7dcd-4f19-8c7e-35ea882c433f\") " pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.634253 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zml9\" (UniqueName: \"kubernetes.io/projected/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-kube-api-access-5zml9\") pod \"nmstate-console-plugin-5dcbbd79cf-nzj7f\" (UID: \"fd2468a9-6de3-40c7-b91a-b2f47b9737b7\") " pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.634339 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-nginx-conf\") pod \"nmstate-console-plugin-5dcbbd79cf-nzj7f\" (UID: \"fd2468a9-6de3-40c7-b91a-b2f47b9737b7\") " pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.634366 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-plugin-serving-cert\") pod \"nmstate-console-plugin-5dcbbd79cf-nzj7f\" (UID: \"fd2468a9-6de3-40c7-b91a-b2f47b9737b7\") " pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.634423 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/aac8927a-2c6c-40e7-9357-9899dd63927b-ovs-socket\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.634440 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlwcb\" (UniqueName: \"kubernetes.io/projected/aac8927a-2c6c-40e7-9357-9899dd63927b-kube-api-access-hlwcb\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.634560 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/aac8927a-2c6c-40e7-9357-9899dd63927b-dbus-socket\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.635257 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/aac8927a-2c6c-40e7-9357-9899dd63927b-ovs-socket\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.635301 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/aac8927a-2c6c-40e7-9357-9899dd63927b-nmstate-lock\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.649782 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-69594cc75-qtlsh" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.650061 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/740f3d53-7dcd-4f19-8c7e-35ea882c433f-tls-key-pair\") pod \"nmstate-webhook-786f45cff4-mvl65\" (UID: \"740f3d53-7dcd-4f19-8c7e-35ea882c433f\") " pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.673409 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6fc8c869c4-6xmzz"] Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.674145 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.678152 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4h5v\" (UniqueName: \"kubernetes.io/projected/740f3d53-7dcd-4f19-8c7e-35ea882c433f-kube-api-access-p4h5v\") pod \"nmstate-webhook-786f45cff4-mvl65\" (UID: \"740f3d53-7dcd-4f19-8c7e-35ea882c433f\") " pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.691710 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.712968 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlwcb\" (UniqueName: \"kubernetes.io/projected/aac8927a-2c6c-40e7-9357-9899dd63927b-kube-api-access-hlwcb\") pod \"nmstate-handler-6mj44\" (UID: \"aac8927a-2c6c-40e7-9357-9899dd63927b\") " pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.717325 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6fc8c869c4-6xmzz"] Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.735581 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zml9\" (UniqueName: \"kubernetes.io/projected/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-kube-api-access-5zml9\") pod \"nmstate-console-plugin-5dcbbd79cf-nzj7f\" (UID: \"fd2468a9-6de3-40c7-b91a-b2f47b9737b7\") " pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.735650 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-nginx-conf\") pod \"nmstate-console-plugin-5dcbbd79cf-nzj7f\" (UID: \"fd2468a9-6de3-40c7-b91a-b2f47b9737b7\") " pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.735677 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-plugin-serving-cert\") pod \"nmstate-console-plugin-5dcbbd79cf-nzj7f\" (UID: \"fd2468a9-6de3-40c7-b91a-b2f47b9737b7\") " pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:50 crc kubenswrapper[4751]: E0227 16:39:50.735842 4751 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Feb 27 16:39:50 crc kubenswrapper[4751]: E0227 16:39:50.735892 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-plugin-serving-cert podName:fd2468a9-6de3-40c7-b91a-b2f47b9737b7 nodeName:}" failed. No retries permitted until 2026-02-27 16:39:51.235876176 +0000 UTC m=+953.382890623 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-plugin-serving-cert") pod "nmstate-console-plugin-5dcbbd79cf-nzj7f" (UID: "fd2468a9-6de3-40c7-b91a-b2f47b9737b7") : secret "plugin-serving-cert" not found Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.736966 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-nginx-conf\") pod \"nmstate-console-plugin-5dcbbd79cf-nzj7f\" (UID: \"fd2468a9-6de3-40c7-b91a-b2f47b9737b7\") " pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.767839 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zml9\" (UniqueName: \"kubernetes.io/projected/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-kube-api-access-5zml9\") pod \"nmstate-console-plugin-5dcbbd79cf-nzj7f\" (UID: \"fd2468a9-6de3-40c7-b91a-b2f47b9737b7\") " pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.837229 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-oauth-serving-cert\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.837275 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-849sp\" (UniqueName: \"kubernetes.io/projected/6aebea7e-98ef-43bf-a765-62da9e107aa3-kube-api-access-849sp\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.837313 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-console-config\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.837364 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-service-ca\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.837379 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-trusted-ca-bundle\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.837414 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6aebea7e-98ef-43bf-a765-62da9e107aa3-console-oauth-config\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.837515 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6aebea7e-98ef-43bf-a765-62da9e107aa3-console-serving-cert\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.940072 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-service-ca\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.940117 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-trusted-ca-bundle\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.940135 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6aebea7e-98ef-43bf-a765-62da9e107aa3-console-oauth-config\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.940172 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6aebea7e-98ef-43bf-a765-62da9e107aa3-console-serving-cert\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.940210 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-oauth-serving-cert\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.940228 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-849sp\" (UniqueName: \"kubernetes.io/projected/6aebea7e-98ef-43bf-a765-62da9e107aa3-kube-api-access-849sp\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.940280 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-console-config\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.942771 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-console-config\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.947383 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-786f45cff4-mvl65"] Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.947395 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6aebea7e-98ef-43bf-a765-62da9e107aa3-console-serving-cert\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.948254 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-oauth-serving-cert\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.948290 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-service-ca\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.949262 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6aebea7e-98ef-43bf-a765-62da9e107aa3-console-oauth-config\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.951744 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6aebea7e-98ef-43bf-a765-62da9e107aa3-trusted-ca-bundle\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: W0227 16:39:50.960783 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod740f3d53_7dcd_4f19_8c7e_35ea882c433f.slice/crio-6e9d241a77c102a1dba708485505b703993a3686d94a3f3bf3e0f28a05554227 WatchSource:0}: Error finding container 6e9d241a77c102a1dba708485505b703993a3686d94a3f3bf3e0f28a05554227: Status 404 returned error can't find the container with id 6e9d241a77c102a1dba708485505b703993a3686d94a3f3bf3e0f28a05554227 Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.970126 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-849sp\" (UniqueName: \"kubernetes.io/projected/6aebea7e-98ef-43bf-a765-62da9e107aa3-kube-api-access-849sp\") pod \"console-6fc8c869c4-6xmzz\" (UID: \"6aebea7e-98ef-43bf-a765-62da9e107aa3\") " pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.972814 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:50 crc kubenswrapper[4751]: I0227 16:39:50.972824 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-69594cc75-qtlsh"] Feb 27 16:39:51 crc kubenswrapper[4751]: W0227 16:39:51.011606 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaac8927a_2c6c_40e7_9357_9899dd63927b.slice/crio-95d048d1095a4eab0cebea505947d4a5021d983c6298c250e5818ad672584929 WatchSource:0}: Error finding container 95d048d1095a4eab0cebea505947d4a5021d983c6298c250e5818ad672584929: Status 404 returned error can't find the container with id 95d048d1095a4eab0cebea505947d4a5021d983c6298c250e5818ad672584929 Feb 27 16:39:51 crc kubenswrapper[4751]: I0227 16:39:51.087837 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:39:51 crc kubenswrapper[4751]: I0227 16:39:51.244458 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-plugin-serving-cert\") pod \"nmstate-console-plugin-5dcbbd79cf-nzj7f\" (UID: \"fd2468a9-6de3-40c7-b91a-b2f47b9737b7\") " pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:51 crc kubenswrapper[4751]: I0227 16:39:51.248259 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd2468a9-6de3-40c7-b91a-b2f47b9737b7-plugin-serving-cert\") pod \"nmstate-console-plugin-5dcbbd79cf-nzj7f\" (UID: \"fd2468a9-6de3-40c7-b91a-b2f47b9737b7\") " pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:51 crc kubenswrapper[4751]: I0227 16:39:51.277086 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-6mj44" event={"ID":"aac8927a-2c6c-40e7-9357-9899dd63927b","Type":"ContainerStarted","Data":"95d048d1095a4eab0cebea505947d4a5021d983c6298c250e5818ad672584929"} Feb 27 16:39:51 crc kubenswrapper[4751]: I0227 16:39:51.278167 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-69594cc75-qtlsh" event={"ID":"b80ace26-6333-4a22-8c2c-74c9d023f1b7","Type":"ContainerStarted","Data":"9a503e4c5c7a546bdd9adcc463ca799cc2c552502262f13120864c6176598856"} Feb 27 16:39:51 crc kubenswrapper[4751]: I0227 16:39:51.281315 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" event={"ID":"740f3d53-7dcd-4f19-8c7e-35ea882c433f","Type":"ContainerStarted","Data":"6e9d241a77c102a1dba708485505b703993a3686d94a3f3bf3e0f28a05554227"} Feb 27 16:39:51 crc kubenswrapper[4751]: I0227 16:39:51.311560 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6fc8c869c4-6xmzz"] Feb 27 16:39:51 crc kubenswrapper[4751]: W0227 16:39:51.329590 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6aebea7e_98ef_43bf_a765_62da9e107aa3.slice/crio-cab2291f67efb7002889317391491482a1a4edd4200e41ffb84647fefcd63303 WatchSource:0}: Error finding container cab2291f67efb7002889317391491482a1a4edd4200e41ffb84647fefcd63303: Status 404 returned error can't find the container with id cab2291f67efb7002889317391491482a1a4edd4200e41ffb84647fefcd63303 Feb 27 16:39:51 crc kubenswrapper[4751]: I0227 16:39:51.395389 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" Feb 27 16:39:51 crc kubenswrapper[4751]: I0227 16:39:51.646334 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f"] Feb 27 16:39:51 crc kubenswrapper[4751]: W0227 16:39:51.646724 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd2468a9_6de3_40c7_b91a_b2f47b9737b7.slice/crio-4b08bf94aa20e5f4defc69de485df3baf4a1e8e105b0c3747253b4f9f212a5c2 WatchSource:0}: Error finding container 4b08bf94aa20e5f4defc69de485df3baf4a1e8e105b0c3747253b4f9f212a5c2: Status 404 returned error can't find the container with id 4b08bf94aa20e5f4defc69de485df3baf4a1e8e105b0c3747253b4f9f212a5c2 Feb 27 16:39:52 crc kubenswrapper[4751]: I0227 16:39:52.290836 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6fc8c869c4-6xmzz" event={"ID":"6aebea7e-98ef-43bf-a765-62da9e107aa3","Type":"ContainerStarted","Data":"d03b8c691522e523a5226be6872d4176fa79d2b807f2e805ff5ff063e60b0a27"} Feb 27 16:39:52 crc kubenswrapper[4751]: I0227 16:39:52.290893 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6fc8c869c4-6xmzz" event={"ID":"6aebea7e-98ef-43bf-a765-62da9e107aa3","Type":"ContainerStarted","Data":"cab2291f67efb7002889317391491482a1a4edd4200e41ffb84647fefcd63303"} Feb 27 16:39:52 crc kubenswrapper[4751]: I0227 16:39:52.292837 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" event={"ID":"fd2468a9-6de3-40c7-b91a-b2f47b9737b7","Type":"ContainerStarted","Data":"4b08bf94aa20e5f4defc69de485df3baf4a1e8e105b0c3747253b4f9f212a5c2"} Feb 27 16:39:52 crc kubenswrapper[4751]: I0227 16:39:52.321858 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6fc8c869c4-6xmzz" podStartSLOduration=2.321831287 podStartE2EDuration="2.321831287s" podCreationTimestamp="2026-02-27 16:39:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:39:52.314648567 +0000 UTC m=+954.461663074" watchObservedRunningTime="2026-02-27 16:39:52.321831287 +0000 UTC m=+954.468845774" Feb 27 16:39:52 crc kubenswrapper[4751]: I0227 16:39:52.736107 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:52 crc kubenswrapper[4751]: I0227 16:39:52.775675 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:52 crc kubenswrapper[4751]: I0227 16:39:52.983699 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vwj4g"] Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.306514 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vwj4g" podUID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerName="registry-server" containerID="cri-o://be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3" gracePeriod=2 Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.708260 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.795672 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-utilities\") pod \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.796715 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-utilities" (OuterVolumeSpecName: "utilities") pod "fcec1b8c-916f-43f2-9272-fde29b2ef0aa" (UID: "fcec1b8c-916f-43f2-9272-fde29b2ef0aa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.796773 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgxj5\" (UniqueName: \"kubernetes.io/projected/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-kube-api-access-tgxj5\") pod \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.796801 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-catalog-content\") pod \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\" (UID: \"fcec1b8c-916f-43f2-9272-fde29b2ef0aa\") " Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.797051 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.806043 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-kube-api-access-tgxj5" (OuterVolumeSpecName: "kube-api-access-tgxj5") pod "fcec1b8c-916f-43f2-9272-fde29b2ef0aa" (UID: "fcec1b8c-916f-43f2-9272-fde29b2ef0aa"). InnerVolumeSpecName "kube-api-access-tgxj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.898071 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgxj5\" (UniqueName: \"kubernetes.io/projected/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-kube-api-access-tgxj5\") on node \"crc\" DevicePath \"\"" Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.923066 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fcec1b8c-916f-43f2-9272-fde29b2ef0aa" (UID: "fcec1b8c-916f-43f2-9272-fde29b2ef0aa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:39:54 crc kubenswrapper[4751]: I0227 16:39:54.999845 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcec1b8c-916f-43f2-9272-fde29b2ef0aa-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.314448 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" event={"ID":"740f3d53-7dcd-4f19-8c7e-35ea882c433f","Type":"ContainerStarted","Data":"f00b8898eb4ae0964bfe07abda26fc479265a2d966bc49500e17f78b3f9179b7"} Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.314787 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.322913 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-6mj44" event={"ID":"aac8927a-2c6c-40e7-9357-9899dd63927b","Type":"ContainerStarted","Data":"be60ab62274b8da00b0d6cc30c96127d7154cbcc83b59d3fc0448d8248251ead"} Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.323120 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.329097 4751 generic.go:334] "Generic (PLEG): container finished" podID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerID="be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3" exitCode=0 Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.329177 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj4g" event={"ID":"fcec1b8c-916f-43f2-9272-fde29b2ef0aa","Type":"ContainerDied","Data":"be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3"} Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.329204 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj4g" event={"ID":"fcec1b8c-916f-43f2-9272-fde29b2ef0aa","Type":"ContainerDied","Data":"076415dc274dc0ce2596f206de7a50d531ca72fa15cc2e0758a34caeb0321b5b"} Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.329235 4751 scope.go:117] "RemoveContainer" containerID="be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.329378 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwj4g" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.338579 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" podStartSLOduration=1.811371649 podStartE2EDuration="5.338556805s" podCreationTimestamp="2026-02-27 16:39:50 +0000 UTC" firstStartedPulling="2026-02-27 16:39:50.963146713 +0000 UTC m=+953.110161160" lastFinishedPulling="2026-02-27 16:39:54.490331869 +0000 UTC m=+956.637346316" observedRunningTime="2026-02-27 16:39:55.335000621 +0000 UTC m=+957.482015088" watchObservedRunningTime="2026-02-27 16:39:55.338556805 +0000 UTC m=+957.485571252" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.339331 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-69594cc75-qtlsh" event={"ID":"b80ace26-6333-4a22-8c2c-74c9d023f1b7","Type":"ContainerStarted","Data":"77cd265a39e7636c28c82c2fbb878b057d234fdf7f0c987da785c30a75f9f003"} Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.356355 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-6mj44" podStartSLOduration=1.8687939409999998 podStartE2EDuration="5.356333516s" podCreationTimestamp="2026-02-27 16:39:50 +0000 UTC" firstStartedPulling="2026-02-27 16:39:51.013720894 +0000 UTC m=+953.160735341" lastFinishedPulling="2026-02-27 16:39:54.501260429 +0000 UTC m=+956.648274916" observedRunningTime="2026-02-27 16:39:55.347988215 +0000 UTC m=+957.495002682" watchObservedRunningTime="2026-02-27 16:39:55.356333516 +0000 UTC m=+957.503347963" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.382246 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vwj4g"] Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.391515 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vwj4g"] Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.475824 4751 scope.go:117] "RemoveContainer" containerID="033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.499370 4751 scope.go:117] "RemoveContainer" containerID="98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.514673 4751 scope.go:117] "RemoveContainer" containerID="be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3" Feb 27 16:39:55 crc kubenswrapper[4751]: E0227 16:39:55.515126 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3\": container with ID starting with be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3 not found: ID does not exist" containerID="be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.515157 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3"} err="failed to get container status \"be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3\": rpc error: code = NotFound desc = could not find container \"be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3\": container with ID starting with be6df3f173b38eb3b2d17f328031b8d22abca7c87b42fb6a95ce8279e9c522f3 not found: ID does not exist" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.515179 4751 scope.go:117] "RemoveContainer" containerID="033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec" Feb 27 16:39:55 crc kubenswrapper[4751]: E0227 16:39:55.515426 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec\": container with ID starting with 033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec not found: ID does not exist" containerID="033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.515444 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec"} err="failed to get container status \"033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec\": rpc error: code = NotFound desc = could not find container \"033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec\": container with ID starting with 033e0ed493622a8f9f798b8dd21061f85f33ffb46c40f11e0b03a58577bc29ec not found: ID does not exist" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.515456 4751 scope.go:117] "RemoveContainer" containerID="98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71" Feb 27 16:39:55 crc kubenswrapper[4751]: E0227 16:39:55.515821 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71\": container with ID starting with 98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71 not found: ID does not exist" containerID="98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71" Feb 27 16:39:55 crc kubenswrapper[4751]: I0227 16:39:55.515843 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71"} err="failed to get container status \"98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71\": rpc error: code = NotFound desc = could not find container \"98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71\": container with ID starting with 98fc4f66c381c755af236f142789efa4e2508c9cf2477ae4f53d0755e9ce3e71 not found: ID does not exist" Feb 27 16:39:56 crc kubenswrapper[4751]: I0227 16:39:56.349487 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" event={"ID":"fd2468a9-6de3-40c7-b91a-b2f47b9737b7","Type":"ContainerStarted","Data":"e6ebffc2ada557051ad453f842e88163e841f5a7c6f290d3564993515b4f4fae"} Feb 27 16:39:56 crc kubenswrapper[4751]: I0227 16:39:56.370329 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5dcbbd79cf-nzj7f" podStartSLOduration=2.458213544 podStartE2EDuration="6.370302908s" podCreationTimestamp="2026-02-27 16:39:50 +0000 UTC" firstStartedPulling="2026-02-27 16:39:51.649536777 +0000 UTC m=+953.796551224" lastFinishedPulling="2026-02-27 16:39:55.561626121 +0000 UTC m=+957.708640588" observedRunningTime="2026-02-27 16:39:56.364976357 +0000 UTC m=+958.511990824" watchObservedRunningTime="2026-02-27 16:39:56.370302908 +0000 UTC m=+958.517317355" Feb 27 16:39:56 crc kubenswrapper[4751]: I0227 16:39:56.529036 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" path="/var/lib/kubelet/pods/fcec1b8c-916f-43f2-9272-fde29b2ef0aa/volumes" Feb 27 16:39:57 crc kubenswrapper[4751]: I0227 16:39:57.361995 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-69594cc75-qtlsh" event={"ID":"b80ace26-6333-4a22-8c2c-74c9d023f1b7","Type":"ContainerStarted","Data":"a74d95cf2f109b62ff35ea4e62f66f8a51253816bb9ffcf60d900863d0252019"} Feb 27 16:39:58 crc kubenswrapper[4751]: I0227 16:39:58.391572 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-69594cc75-qtlsh" podStartSLOduration=2.215041265 podStartE2EDuration="8.391546783s" podCreationTimestamp="2026-02-27 16:39:50 +0000 UTC" firstStartedPulling="2026-02-27 16:39:50.999660261 +0000 UTC m=+953.146674708" lastFinishedPulling="2026-02-27 16:39:57.176165779 +0000 UTC m=+959.323180226" observedRunningTime="2026-02-27 16:39:58.386784796 +0000 UTC m=+960.533799253" watchObservedRunningTime="2026-02-27 16:39:58.391546783 +0000 UTC m=+960.538561240" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.143378 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536840-gt6tq"] Feb 27 16:40:00 crc kubenswrapper[4751]: E0227 16:40:00.143896 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerName="extract-utilities" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.143929 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerName="extract-utilities" Feb 27 16:40:00 crc kubenswrapper[4751]: E0227 16:40:00.143951 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerName="extract-content" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.143966 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerName="extract-content" Feb 27 16:40:00 crc kubenswrapper[4751]: E0227 16:40:00.144015 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerName="registry-server" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.144030 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerName="registry-server" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.144220 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcec1b8c-916f-43f2-9272-fde29b2ef0aa" containerName="registry-server" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.145185 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536840-gt6tq" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.150730 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.150754 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.154926 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536840-gt6tq"] Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.158330 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.277918 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkd29\" (UniqueName: \"kubernetes.io/projected/95d5d54c-def0-4b9e-b678-e708dec2ecc5-kube-api-access-kkd29\") pod \"auto-csr-approver-29536840-gt6tq\" (UID: \"95d5d54c-def0-4b9e-b678-e708dec2ecc5\") " pod="openshift-infra/auto-csr-approver-29536840-gt6tq" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.379603 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkd29\" (UniqueName: \"kubernetes.io/projected/95d5d54c-def0-4b9e-b678-e708dec2ecc5-kube-api-access-kkd29\") pod \"auto-csr-approver-29536840-gt6tq\" (UID: \"95d5d54c-def0-4b9e-b678-e708dec2ecc5\") " pod="openshift-infra/auto-csr-approver-29536840-gt6tq" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.401196 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkd29\" (UniqueName: \"kubernetes.io/projected/95d5d54c-def0-4b9e-b678-e708dec2ecc5-kube-api-access-kkd29\") pod \"auto-csr-approver-29536840-gt6tq\" (UID: \"95d5d54c-def0-4b9e-b678-e708dec2ecc5\") " pod="openshift-infra/auto-csr-approver-29536840-gt6tq" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.468224 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536840-gt6tq" Feb 27 16:40:00 crc kubenswrapper[4751]: I0227 16:40:00.720166 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536840-gt6tq"] Feb 27 16:40:00 crc kubenswrapper[4751]: W0227 16:40:00.730199 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95d5d54c_def0_4b9e_b678_e708dec2ecc5.slice/crio-cd577ebfbafd57e5f4adea7e4a92547970e049ee8301bd28e699887cab73026b WatchSource:0}: Error finding container cd577ebfbafd57e5f4adea7e4a92547970e049ee8301bd28e699887cab73026b: Status 404 returned error can't find the container with id cd577ebfbafd57e5f4adea7e4a92547970e049ee8301bd28e699887cab73026b Feb 27 16:40:01 crc kubenswrapper[4751]: I0227 16:40:01.011945 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-6mj44" Feb 27 16:40:01 crc kubenswrapper[4751]: I0227 16:40:01.088370 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:40:01 crc kubenswrapper[4751]: I0227 16:40:01.088463 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:40:01 crc kubenswrapper[4751]: I0227 16:40:01.097100 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:40:01 crc kubenswrapper[4751]: I0227 16:40:01.395940 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536840-gt6tq" event={"ID":"95d5d54c-def0-4b9e-b678-e708dec2ecc5","Type":"ContainerStarted","Data":"cd577ebfbafd57e5f4adea7e4a92547970e049ee8301bd28e699887cab73026b"} Feb 27 16:40:01 crc kubenswrapper[4751]: I0227 16:40:01.403915 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6fc8c869c4-6xmzz" Feb 27 16:40:01 crc kubenswrapper[4751]: I0227 16:40:01.487475 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-hb87p"] Feb 27 16:40:02 crc kubenswrapper[4751]: I0227 16:40:02.403347 4751 generic.go:334] "Generic (PLEG): container finished" podID="95d5d54c-def0-4b9e-b678-e708dec2ecc5" containerID="21531c6323ea30503124a33a716bdbd9ac3a3f6b39a03e44004cf805cd19e25c" exitCode=0 Feb 27 16:40:02 crc kubenswrapper[4751]: I0227 16:40:02.403470 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536840-gt6tq" event={"ID":"95d5d54c-def0-4b9e-b678-e708dec2ecc5","Type":"ContainerDied","Data":"21531c6323ea30503124a33a716bdbd9ac3a3f6b39a03e44004cf805cd19e25c"} Feb 27 16:40:03 crc kubenswrapper[4751]: I0227 16:40:03.718556 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536840-gt6tq" Feb 27 16:40:03 crc kubenswrapper[4751]: I0227 16:40:03.833859 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkd29\" (UniqueName: \"kubernetes.io/projected/95d5d54c-def0-4b9e-b678-e708dec2ecc5-kube-api-access-kkd29\") pod \"95d5d54c-def0-4b9e-b678-e708dec2ecc5\" (UID: \"95d5d54c-def0-4b9e-b678-e708dec2ecc5\") " Feb 27 16:40:03 crc kubenswrapper[4751]: I0227 16:40:03.842714 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95d5d54c-def0-4b9e-b678-e708dec2ecc5-kube-api-access-kkd29" (OuterVolumeSpecName: "kube-api-access-kkd29") pod "95d5d54c-def0-4b9e-b678-e708dec2ecc5" (UID: "95d5d54c-def0-4b9e-b678-e708dec2ecc5"). InnerVolumeSpecName "kube-api-access-kkd29". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:40:03 crc kubenswrapper[4751]: I0227 16:40:03.937853 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkd29\" (UniqueName: \"kubernetes.io/projected/95d5d54c-def0-4b9e-b678-e708dec2ecc5-kube-api-access-kkd29\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:04 crc kubenswrapper[4751]: I0227 16:40:04.421084 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536840-gt6tq" event={"ID":"95d5d54c-def0-4b9e-b678-e708dec2ecc5","Type":"ContainerDied","Data":"cd577ebfbafd57e5f4adea7e4a92547970e049ee8301bd28e699887cab73026b"} Feb 27 16:40:04 crc kubenswrapper[4751]: I0227 16:40:04.421137 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd577ebfbafd57e5f4adea7e4a92547970e049ee8301bd28e699887cab73026b" Feb 27 16:40:04 crc kubenswrapper[4751]: I0227 16:40:04.421186 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536840-gt6tq" Feb 27 16:40:04 crc kubenswrapper[4751]: I0227 16:40:04.776777 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536834-s786r"] Feb 27 16:40:04 crc kubenswrapper[4751]: I0227 16:40:04.783887 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536834-s786r"] Feb 27 16:40:06 crc kubenswrapper[4751]: I0227 16:40:06.527831 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c33cd39-6c33-4bd9-80aa-819849282664" path="/var/lib/kubelet/pods/8c33cd39-6c33-4bd9-80aa-819849282664/volumes" Feb 27 16:40:10 crc kubenswrapper[4751]: I0227 16:40:10.702891 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-786f45cff4-mvl65" Feb 27 16:40:23 crc kubenswrapper[4751]: I0227 16:40:23.517266 4751 scope.go:117] "RemoveContainer" containerID="9f78361b0884090d3f79935b157812fcb5c1a4975336aed5d96d022d5f80dac6" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.026473 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7"] Feb 27 16:40:24 crc kubenswrapper[4751]: E0227 16:40:24.026954 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95d5d54c-def0-4b9e-b678-e708dec2ecc5" containerName="oc" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.026970 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="95d5d54c-def0-4b9e-b678-e708dec2ecc5" containerName="oc" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.027069 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="95d5d54c-def0-4b9e-b678-e708dec2ecc5" containerName="oc" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.028027 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.030125 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.072145 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7"] Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.154652 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-bundle\") pod \"d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.154726 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-util\") pod \"d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.154786 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmprh\" (UniqueName: \"kubernetes.io/projected/1a21af89-bee9-466f-af38-f9b30329134e-kube-api-access-vmprh\") pod \"d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.256035 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmprh\" (UniqueName: \"kubernetes.io/projected/1a21af89-bee9-466f-af38-f9b30329134e-kube-api-access-vmprh\") pod \"d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.256153 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-bundle\") pod \"d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.256203 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-util\") pod \"d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.257034 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-bundle\") pod \"d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.259382 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-util\") pod \"d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.282880 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmprh\" (UniqueName: \"kubernetes.io/projected/1a21af89-bee9-466f-af38-f9b30329134e-kube-api-access-vmprh\") pod \"d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.342529 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:24 crc kubenswrapper[4751]: I0227 16:40:24.617546 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7"] Feb 27 16:40:24 crc kubenswrapper[4751]: W0227 16:40:24.623245 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a21af89_bee9_466f_af38_f9b30329134e.slice/crio-715303464e8023af6e4872618ece6e31089cc0e8a04126ff7946e1298ed8f4f2 WatchSource:0}: Error finding container 715303464e8023af6e4872618ece6e31089cc0e8a04126ff7946e1298ed8f4f2: Status 404 returned error can't find the container with id 715303464e8023af6e4872618ece6e31089cc0e8a04126ff7946e1298ed8f4f2 Feb 27 16:40:25 crc kubenswrapper[4751]: I0227 16:40:25.595541 4751 generic.go:334] "Generic (PLEG): container finished" podID="1a21af89-bee9-466f-af38-f9b30329134e" containerID="89200b84f4f6867706034f11e0b31cb8900db8f46d8a0a5a8b9fe3ac42fef52e" exitCode=0 Feb 27 16:40:25 crc kubenswrapper[4751]: I0227 16:40:25.595639 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" event={"ID":"1a21af89-bee9-466f-af38-f9b30329134e","Type":"ContainerDied","Data":"89200b84f4f6867706034f11e0b31cb8900db8f46d8a0a5a8b9fe3ac42fef52e"} Feb 27 16:40:25 crc kubenswrapper[4751]: I0227 16:40:25.595891 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" event={"ID":"1a21af89-bee9-466f-af38-f9b30329134e","Type":"ContainerStarted","Data":"715303464e8023af6e4872618ece6e31089cc0e8a04126ff7946e1298ed8f4f2"} Feb 27 16:40:26 crc kubenswrapper[4751]: I0227 16:40:26.549308 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-hb87p" podUID="ee39d7ed-b569-4c34-8c19-a5f386c85b5c" containerName="console" containerID="cri-o://cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411" gracePeriod=15 Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.088526 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-hb87p_ee39d7ed-b569-4c34-8c19-a5f386c85b5c/console/0.log" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.088947 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.203923 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-config\") pod \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.204087 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-oauth-serving-cert\") pod \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.204136 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-oauth-config\") pod \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.204183 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-serving-cert\") pod \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.204282 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppbrc\" (UniqueName: \"kubernetes.io/projected/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-kube-api-access-ppbrc\") pod \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.204323 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-service-ca\") pod \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.204365 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-trusted-ca-bundle\") pod \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\" (UID: \"ee39d7ed-b569-4c34-8c19-a5f386c85b5c\") " Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.205519 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "ee39d7ed-b569-4c34-8c19-a5f386c85b5c" (UID: "ee39d7ed-b569-4c34-8c19-a5f386c85b5c"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.205581 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-service-ca" (OuterVolumeSpecName: "service-ca") pod "ee39d7ed-b569-4c34-8c19-a5f386c85b5c" (UID: "ee39d7ed-b569-4c34-8c19-a5f386c85b5c"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.205762 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "ee39d7ed-b569-4c34-8c19-a5f386c85b5c" (UID: "ee39d7ed-b569-4c34-8c19-a5f386c85b5c"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.206162 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-config" (OuterVolumeSpecName: "console-config") pod "ee39d7ed-b569-4c34-8c19-a5f386c85b5c" (UID: "ee39d7ed-b569-4c34-8c19-a5f386c85b5c"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.214641 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-kube-api-access-ppbrc" (OuterVolumeSpecName: "kube-api-access-ppbrc") pod "ee39d7ed-b569-4c34-8c19-a5f386c85b5c" (UID: "ee39d7ed-b569-4c34-8c19-a5f386c85b5c"). InnerVolumeSpecName "kube-api-access-ppbrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.215331 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "ee39d7ed-b569-4c34-8c19-a5f386c85b5c" (UID: "ee39d7ed-b569-4c34-8c19-a5f386c85b5c"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.215677 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "ee39d7ed-b569-4c34-8c19-a5f386c85b5c" (UID: "ee39d7ed-b569-4c34-8c19-a5f386c85b5c"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.306278 4751 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.306339 4751 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.306362 4751 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.306382 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppbrc\" (UniqueName: \"kubernetes.io/projected/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-kube-api-access-ppbrc\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.306447 4751 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-service-ca\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.306468 4751 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.306487 4751 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ee39d7ed-b569-4c34-8c19-a5f386c85b5c-console-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.615307 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-hb87p_ee39d7ed-b569-4c34-8c19-a5f386c85b5c/console/0.log" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.615390 4751 generic.go:334] "Generic (PLEG): container finished" podID="ee39d7ed-b569-4c34-8c19-a5f386c85b5c" containerID="cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411" exitCode=2 Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.615526 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-hb87p" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.615544 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-hb87p" event={"ID":"ee39d7ed-b569-4c34-8c19-a5f386c85b5c","Type":"ContainerDied","Data":"cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411"} Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.615686 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-hb87p" event={"ID":"ee39d7ed-b569-4c34-8c19-a5f386c85b5c","Type":"ContainerDied","Data":"bb0394270f0749a331489903ff13a2df3a100679b1bb38008c539dd6b8118877"} Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.615719 4751 scope.go:117] "RemoveContainer" containerID="cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.618795 4751 generic.go:334] "Generic (PLEG): container finished" podID="1a21af89-bee9-466f-af38-f9b30329134e" containerID="3e567e21c011e2dde8e81bfce20696fe89feb1c9613ae85e59e84a2c8a111ea3" exitCode=0 Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.618853 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" event={"ID":"1a21af89-bee9-466f-af38-f9b30329134e","Type":"ContainerDied","Data":"3e567e21c011e2dde8e81bfce20696fe89feb1c9613ae85e59e84a2c8a111ea3"} Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.660712 4751 scope.go:117] "RemoveContainer" containerID="cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411" Feb 27 16:40:27 crc kubenswrapper[4751]: E0227 16:40:27.661987 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411\": container with ID starting with cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411 not found: ID does not exist" containerID="cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.662091 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411"} err="failed to get container status \"cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411\": rpc error: code = NotFound desc = could not find container \"cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411\": container with ID starting with cc5388dd2c32ec38ef35a6a99977ea25c61827c028d0a1342a5aec8ae655c411 not found: ID does not exist" Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.698554 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-hb87p"] Feb 27 16:40:27 crc kubenswrapper[4751]: I0227 16:40:27.709907 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-hb87p"] Feb 27 16:40:28 crc kubenswrapper[4751]: I0227 16:40:28.534719 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee39d7ed-b569-4c34-8c19-a5f386c85b5c" path="/var/lib/kubelet/pods/ee39d7ed-b569-4c34-8c19-a5f386c85b5c/volumes" Feb 27 16:40:28 crc kubenswrapper[4751]: I0227 16:40:28.633898 4751 generic.go:334] "Generic (PLEG): container finished" podID="1a21af89-bee9-466f-af38-f9b30329134e" containerID="dc2f4ea3058edb6c20903934c30c09f848436296ff2a577e779f53868ca09b7a" exitCode=0 Feb 27 16:40:28 crc kubenswrapper[4751]: I0227 16:40:28.633969 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" event={"ID":"1a21af89-bee9-466f-af38-f9b30329134e","Type":"ContainerDied","Data":"dc2f4ea3058edb6c20903934c30c09f848436296ff2a577e779f53868ca09b7a"} Feb 27 16:40:29 crc kubenswrapper[4751]: I0227 16:40:29.857226 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.045798 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-util\") pod \"1a21af89-bee9-466f-af38-f9b30329134e\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.045980 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-bundle\") pod \"1a21af89-bee9-466f-af38-f9b30329134e\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.046042 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmprh\" (UniqueName: \"kubernetes.io/projected/1a21af89-bee9-466f-af38-f9b30329134e-kube-api-access-vmprh\") pod \"1a21af89-bee9-466f-af38-f9b30329134e\" (UID: \"1a21af89-bee9-466f-af38-f9b30329134e\") " Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.047118 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-bundle" (OuterVolumeSpecName: "bundle") pod "1a21af89-bee9-466f-af38-f9b30329134e" (UID: "1a21af89-bee9-466f-af38-f9b30329134e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.056121 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a21af89-bee9-466f-af38-f9b30329134e-kube-api-access-vmprh" (OuterVolumeSpecName: "kube-api-access-vmprh") pod "1a21af89-bee9-466f-af38-f9b30329134e" (UID: "1a21af89-bee9-466f-af38-f9b30329134e"). InnerVolumeSpecName "kube-api-access-vmprh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.059559 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-util" (OuterVolumeSpecName: "util") pod "1a21af89-bee9-466f-af38-f9b30329134e" (UID: "1a21af89-bee9-466f-af38-f9b30329134e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.147660 4751 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-util\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.147704 4751 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1a21af89-bee9-466f-af38-f9b30329134e-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.147719 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmprh\" (UniqueName: \"kubernetes.io/projected/1a21af89-bee9-466f-af38-f9b30329134e-kube-api-access-vmprh\") on node \"crc\" DevicePath \"\"" Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.647168 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" event={"ID":"1a21af89-bee9-466f-af38-f9b30329134e","Type":"ContainerDied","Data":"715303464e8023af6e4872618ece6e31089cc0e8a04126ff7946e1298ed8f4f2"} Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.647450 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="715303464e8023af6e4872618ece6e31089cc0e8a04126ff7946e1298ed8f4f2" Feb 27 16:40:30 crc kubenswrapper[4751]: I0227 16:40:30.647511 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.751462 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-b447877db-svmqs"] Feb 27 16:40:39 crc kubenswrapper[4751]: E0227 16:40:39.752103 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a21af89-bee9-466f-af38-f9b30329134e" containerName="pull" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.752115 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a21af89-bee9-466f-af38-f9b30329134e" containerName="pull" Feb 27 16:40:39 crc kubenswrapper[4751]: E0227 16:40:39.752125 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a21af89-bee9-466f-af38-f9b30329134e" containerName="util" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.752131 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a21af89-bee9-466f-af38-f9b30329134e" containerName="util" Feb 27 16:40:39 crc kubenswrapper[4751]: E0227 16:40:39.752140 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee39d7ed-b569-4c34-8c19-a5f386c85b5c" containerName="console" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.752147 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee39d7ed-b569-4c34-8c19-a5f386c85b5c" containerName="console" Feb 27 16:40:39 crc kubenswrapper[4751]: E0227 16:40:39.752158 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a21af89-bee9-466f-af38-f9b30329134e" containerName="extract" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.752163 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a21af89-bee9-466f-af38-f9b30329134e" containerName="extract" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.752256 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a21af89-bee9-466f-af38-f9b30329134e" containerName="extract" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.752269 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee39d7ed-b569-4c34-8c19-a5f386c85b5c" containerName="console" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.752683 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.754818 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.755064 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.755078 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-jhtgn" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.755223 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.756295 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.766256 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-b447877db-svmqs"] Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.880875 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f91806a5-f679-477e-b1fe-9f35cbfda94d-apiservice-cert\") pod \"metallb-operator-controller-manager-b447877db-svmqs\" (UID: \"f91806a5-f679-477e-b1fe-9f35cbfda94d\") " pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.881134 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f91806a5-f679-477e-b1fe-9f35cbfda94d-webhook-cert\") pod \"metallb-operator-controller-manager-b447877db-svmqs\" (UID: \"f91806a5-f679-477e-b1fe-9f35cbfda94d\") " pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.881293 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bf2f\" (UniqueName: \"kubernetes.io/projected/f91806a5-f679-477e-b1fe-9f35cbfda94d-kube-api-access-8bf2f\") pod \"metallb-operator-controller-manager-b447877db-svmqs\" (UID: \"f91806a5-f679-477e-b1fe-9f35cbfda94d\") " pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.982216 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bf2f\" (UniqueName: \"kubernetes.io/projected/f91806a5-f679-477e-b1fe-9f35cbfda94d-kube-api-access-8bf2f\") pod \"metallb-operator-controller-manager-b447877db-svmqs\" (UID: \"f91806a5-f679-477e-b1fe-9f35cbfda94d\") " pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.982466 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f91806a5-f679-477e-b1fe-9f35cbfda94d-apiservice-cert\") pod \"metallb-operator-controller-manager-b447877db-svmqs\" (UID: \"f91806a5-f679-477e-b1fe-9f35cbfda94d\") " pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.982608 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f91806a5-f679-477e-b1fe-9f35cbfda94d-webhook-cert\") pod \"metallb-operator-controller-manager-b447877db-svmqs\" (UID: \"f91806a5-f679-477e-b1fe-9f35cbfda94d\") " pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.991505 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f91806a5-f679-477e-b1fe-9f35cbfda94d-webhook-cert\") pod \"metallb-operator-controller-manager-b447877db-svmqs\" (UID: \"f91806a5-f679-477e-b1fe-9f35cbfda94d\") " pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.995427 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl"] Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.996106 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:39 crc kubenswrapper[4751]: I0227 16:40:39.999021 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f91806a5-f679-477e-b1fe-9f35cbfda94d-apiservice-cert\") pod \"metallb-operator-controller-manager-b447877db-svmqs\" (UID: \"f91806a5-f679-477e-b1fe-9f35cbfda94d\") " pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.001250 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.003812 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.003863 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-9hw84" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.006043 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bf2f\" (UniqueName: \"kubernetes.io/projected/f91806a5-f679-477e-b1fe-9f35cbfda94d-kube-api-access-8bf2f\") pod \"metallb-operator-controller-manager-b447877db-svmqs\" (UID: \"f91806a5-f679-477e-b1fe-9f35cbfda94d\") " pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.016522 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl"] Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.067063 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.184136 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4fdw\" (UniqueName: \"kubernetes.io/projected/ac5ec3ef-d5a4-4a18-839b-820031c0c971-kube-api-access-l4fdw\") pod \"metallb-operator-webhook-server-58f8d77f4f-vvpvl\" (UID: \"ac5ec3ef-d5a4-4a18-839b-820031c0c971\") " pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.184200 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ac5ec3ef-d5a4-4a18-839b-820031c0c971-apiservice-cert\") pod \"metallb-operator-webhook-server-58f8d77f4f-vvpvl\" (UID: \"ac5ec3ef-d5a4-4a18-839b-820031c0c971\") " pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.184232 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ac5ec3ef-d5a4-4a18-839b-820031c0c971-webhook-cert\") pod \"metallb-operator-webhook-server-58f8d77f4f-vvpvl\" (UID: \"ac5ec3ef-d5a4-4a18-839b-820031c0c971\") " pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.285531 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4fdw\" (UniqueName: \"kubernetes.io/projected/ac5ec3ef-d5a4-4a18-839b-820031c0c971-kube-api-access-l4fdw\") pod \"metallb-operator-webhook-server-58f8d77f4f-vvpvl\" (UID: \"ac5ec3ef-d5a4-4a18-839b-820031c0c971\") " pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.285590 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ac5ec3ef-d5a4-4a18-839b-820031c0c971-apiservice-cert\") pod \"metallb-operator-webhook-server-58f8d77f4f-vvpvl\" (UID: \"ac5ec3ef-d5a4-4a18-839b-820031c0c971\") " pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.285622 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ac5ec3ef-d5a4-4a18-839b-820031c0c971-webhook-cert\") pod \"metallb-operator-webhook-server-58f8d77f4f-vvpvl\" (UID: \"ac5ec3ef-d5a4-4a18-839b-820031c0c971\") " pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.288974 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ac5ec3ef-d5a4-4a18-839b-820031c0c971-apiservice-cert\") pod \"metallb-operator-webhook-server-58f8d77f4f-vvpvl\" (UID: \"ac5ec3ef-d5a4-4a18-839b-820031c0c971\") " pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.289454 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ac5ec3ef-d5a4-4a18-839b-820031c0c971-webhook-cert\") pod \"metallb-operator-webhook-server-58f8d77f4f-vvpvl\" (UID: \"ac5ec3ef-d5a4-4a18-839b-820031c0c971\") " pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.302043 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4fdw\" (UniqueName: \"kubernetes.io/projected/ac5ec3ef-d5a4-4a18-839b-820031c0c971-kube-api-access-l4fdw\") pod \"metallb-operator-webhook-server-58f8d77f4f-vvpvl\" (UID: \"ac5ec3ef-d5a4-4a18-839b-820031c0c971\") " pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:40 crc kubenswrapper[4751]: W0227 16:40:40.348752 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf91806a5_f679_477e_b1fe_9f35cbfda94d.slice/crio-11f5a9fec407ddf450b9818db4681a5e1f92d53d256378ee1ee2add5710d7c3b WatchSource:0}: Error finding container 11f5a9fec407ddf450b9818db4681a5e1f92d53d256378ee1ee2add5710d7c3b: Status 404 returned error can't find the container with id 11f5a9fec407ddf450b9818db4681a5e1f92d53d256378ee1ee2add5710d7c3b Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.354672 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.361480 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-b447877db-svmqs"] Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.544750 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl"] Feb 27 16:40:40 crc kubenswrapper[4751]: W0227 16:40:40.556229 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac5ec3ef_d5a4_4a18_839b_820031c0c971.slice/crio-28d5dd15eca1c690d547a978f19e04c108a729d5be3bce73d29c441db5bd3317 WatchSource:0}: Error finding container 28d5dd15eca1c690d547a978f19e04c108a729d5be3bce73d29c441db5bd3317: Status 404 returned error can't find the container with id 28d5dd15eca1c690d547a978f19e04c108a729d5be3bce73d29c441db5bd3317 Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.712439 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" event={"ID":"ac5ec3ef-d5a4-4a18-839b-820031c0c971","Type":"ContainerStarted","Data":"28d5dd15eca1c690d547a978f19e04c108a729d5be3bce73d29c441db5bd3317"} Feb 27 16:40:40 crc kubenswrapper[4751]: I0227 16:40:40.713585 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" event={"ID":"f91806a5-f679-477e-b1fe-9f35cbfda94d","Type":"ContainerStarted","Data":"11f5a9fec407ddf450b9818db4681a5e1f92d53d256378ee1ee2add5710d7c3b"} Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.548777 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l4bcd"] Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.550752 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.597301 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l4bcd"] Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.598413 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q4vz\" (UniqueName: \"kubernetes.io/projected/9c1fec5d-77c6-42e3-8117-238b615456bb-kube-api-access-4q4vz\") pod \"certified-operators-l4bcd\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.598451 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-utilities\") pod \"certified-operators-l4bcd\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.598491 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-catalog-content\") pod \"certified-operators-l4bcd\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.699193 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-catalog-content\") pod \"certified-operators-l4bcd\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.699301 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q4vz\" (UniqueName: \"kubernetes.io/projected/9c1fec5d-77c6-42e3-8117-238b615456bb-kube-api-access-4q4vz\") pod \"certified-operators-l4bcd\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.699323 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-utilities\") pod \"certified-operators-l4bcd\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.699806 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-utilities\") pod \"certified-operators-l4bcd\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.700086 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-catalog-content\") pod \"certified-operators-l4bcd\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.723056 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q4vz\" (UniqueName: \"kubernetes.io/projected/9c1fec5d-77c6-42e3-8117-238b615456bb-kube-api-access-4q4vz\") pod \"certified-operators-l4bcd\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:49 crc kubenswrapper[4751]: I0227 16:40:49.875696 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:40:51 crc kubenswrapper[4751]: W0227 16:40:51.964384 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c1fec5d_77c6_42e3_8117_238b615456bb.slice/crio-dfc151f66d21e5d3b78912e66f695ee4526059d3891b8029633f61b2ba9aac59 WatchSource:0}: Error finding container dfc151f66d21e5d3b78912e66f695ee4526059d3891b8029633f61b2ba9aac59: Status 404 returned error can't find the container with id dfc151f66d21e5d3b78912e66f695ee4526059d3891b8029633f61b2ba9aac59 Feb 27 16:40:51 crc kubenswrapper[4751]: I0227 16:40:51.967269 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l4bcd"] Feb 27 16:40:52 crc kubenswrapper[4751]: I0227 16:40:52.796308 4751 generic.go:334] "Generic (PLEG): container finished" podID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerID="77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19" exitCode=0 Feb 27 16:40:52 crc kubenswrapper[4751]: I0227 16:40:52.796486 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4bcd" event={"ID":"9c1fec5d-77c6-42e3-8117-238b615456bb","Type":"ContainerDied","Data":"77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19"} Feb 27 16:40:52 crc kubenswrapper[4751]: I0227 16:40:52.796563 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4bcd" event={"ID":"9c1fec5d-77c6-42e3-8117-238b615456bb","Type":"ContainerStarted","Data":"dfc151f66d21e5d3b78912e66f695ee4526059d3891b8029633f61b2ba9aac59"} Feb 27 16:40:52 crc kubenswrapper[4751]: I0227 16:40:52.799325 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" event={"ID":"ac5ec3ef-d5a4-4a18-839b-820031c0c971","Type":"ContainerStarted","Data":"c6f33fca7b4fcda6be31e32f6799de8462b02cb987001d8178f42a82c7e27926"} Feb 27 16:40:52 crc kubenswrapper[4751]: I0227 16:40:52.799480 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:40:52 crc kubenswrapper[4751]: I0227 16:40:52.801886 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" event={"ID":"f91806a5-f679-477e-b1fe-9f35cbfda94d","Type":"ContainerStarted","Data":"452ddd931cfee8cf82151af25dc46992344d150ce1370b3acffe62a5a96c2840"} Feb 27 16:40:52 crc kubenswrapper[4751]: I0227 16:40:52.802069 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:40:52 crc kubenswrapper[4751]: I0227 16:40:52.857626 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" podStartSLOduration=2.8802455890000003 podStartE2EDuration="13.857609996s" podCreationTimestamp="2026-02-27 16:40:39 +0000 UTC" firstStartedPulling="2026-02-27 16:40:40.559888308 +0000 UTC m=+1002.706902755" lastFinishedPulling="2026-02-27 16:40:51.537252705 +0000 UTC m=+1013.684267162" observedRunningTime="2026-02-27 16:40:52.856911698 +0000 UTC m=+1015.003926155" watchObservedRunningTime="2026-02-27 16:40:52.857609996 +0000 UTC m=+1015.004624443" Feb 27 16:40:52 crc kubenswrapper[4751]: I0227 16:40:52.924150 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" podStartSLOduration=2.7652666139999997 podStartE2EDuration="13.924132254s" podCreationTimestamp="2026-02-27 16:40:39 +0000 UTC" firstStartedPulling="2026-02-27 16:40:40.353421452 +0000 UTC m=+1002.500435899" lastFinishedPulling="2026-02-27 16:40:51.512287092 +0000 UTC m=+1013.659301539" observedRunningTime="2026-02-27 16:40:52.920715373 +0000 UTC m=+1015.067729830" watchObservedRunningTime="2026-02-27 16:40:52.924132254 +0000 UTC m=+1015.071146701" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.155715 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qzv64"] Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.157256 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.173070 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qzv64"] Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.298873 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-catalog-content\") pod \"redhat-marketplace-qzv64\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.299044 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2b4mc\" (UniqueName: \"kubernetes.io/projected/47c88bfe-cd2c-45ef-8c32-a5e79176903b-kube-api-access-2b4mc\") pod \"redhat-marketplace-qzv64\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.299265 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-utilities\") pod \"redhat-marketplace-qzv64\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.400339 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2b4mc\" (UniqueName: \"kubernetes.io/projected/47c88bfe-cd2c-45ef-8c32-a5e79176903b-kube-api-access-2b4mc\") pod \"redhat-marketplace-qzv64\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.400429 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-utilities\") pod \"redhat-marketplace-qzv64\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.400475 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-catalog-content\") pod \"redhat-marketplace-qzv64\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.400917 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-catalog-content\") pod \"redhat-marketplace-qzv64\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.401065 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-utilities\") pod \"redhat-marketplace-qzv64\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.423278 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2b4mc\" (UniqueName: \"kubernetes.io/projected/47c88bfe-cd2c-45ef-8c32-a5e79176903b-kube-api-access-2b4mc\") pod \"redhat-marketplace-qzv64\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.471253 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.791869 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qzv64"] Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.838451 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qzv64" event={"ID":"47c88bfe-cd2c-45ef-8c32-a5e79176903b","Type":"ContainerStarted","Data":"8e99e4eef5fc01cf45db3e7bff28efa3cf2417991bc3fdeb1b7cd6952ff4da6d"} Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.841214 4751 generic.go:334] "Generic (PLEG): container finished" podID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerID="c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203" exitCode=0 Feb 27 16:40:57 crc kubenswrapper[4751]: I0227 16:40:57.841253 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4bcd" event={"ID":"9c1fec5d-77c6-42e3-8117-238b615456bb","Type":"ContainerDied","Data":"c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203"} Feb 27 16:41:00 crc kubenswrapper[4751]: I0227 16:41:00.862084 4751 generic.go:334] "Generic (PLEG): container finished" podID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerID="a9af31915d14db934b66ef2c3f7e6278e23e624b4f2995d40425f416d3d0383a" exitCode=0 Feb 27 16:41:00 crc kubenswrapper[4751]: I0227 16:41:00.862174 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qzv64" event={"ID":"47c88bfe-cd2c-45ef-8c32-a5e79176903b","Type":"ContainerDied","Data":"a9af31915d14db934b66ef2c3f7e6278e23e624b4f2995d40425f416d3d0383a"} Feb 27 16:41:02 crc kubenswrapper[4751]: I0227 16:41:02.876807 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4bcd" event={"ID":"9c1fec5d-77c6-42e3-8117-238b615456bb","Type":"ContainerStarted","Data":"d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531"} Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.535594 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l4bcd" podStartSLOduration=8.565818108 podStartE2EDuration="17.535565835s" podCreationTimestamp="2026-02-27 16:40:49 +0000 UTC" firstStartedPulling="2026-02-27 16:40:52.798378122 +0000 UTC m=+1014.945392599" lastFinishedPulling="2026-02-27 16:41:01.768125879 +0000 UTC m=+1023.915140326" observedRunningTime="2026-02-27 16:41:02.903802513 +0000 UTC m=+1025.050816960" watchObservedRunningTime="2026-02-27 16:41:06.535565835 +0000 UTC m=+1028.682580302" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.537268 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wl6x8"] Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.539102 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.548000 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wl6x8"] Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.664104 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-utilities\") pod \"community-operators-wl6x8\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.664238 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-catalog-content\") pod \"community-operators-wl6x8\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.664283 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spm56\" (UniqueName: \"kubernetes.io/projected/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-kube-api-access-spm56\") pod \"community-operators-wl6x8\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.764822 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-utilities\") pod \"community-operators-wl6x8\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.764884 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-catalog-content\") pod \"community-operators-wl6x8\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.764907 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spm56\" (UniqueName: \"kubernetes.io/projected/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-kube-api-access-spm56\") pod \"community-operators-wl6x8\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.765333 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-utilities\") pod \"community-operators-wl6x8\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.765626 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-catalog-content\") pod \"community-operators-wl6x8\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.798609 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spm56\" (UniqueName: \"kubernetes.io/projected/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-kube-api-access-spm56\") pod \"community-operators-wl6x8\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:06 crc kubenswrapper[4751]: I0227 16:41:06.859797 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:09 crc kubenswrapper[4751]: I0227 16:41:09.876374 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:41:09 crc kubenswrapper[4751]: I0227 16:41:09.877133 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:41:09 crc kubenswrapper[4751]: I0227 16:41:09.944672 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:41:09 crc kubenswrapper[4751]: I0227 16:41:09.989559 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:41:10 crc kubenswrapper[4751]: I0227 16:41:10.185566 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l4bcd"] Feb 27 16:41:10 crc kubenswrapper[4751]: I0227 16:41:10.363351 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-58f8d77f4f-vvpvl" Feb 27 16:41:11 crc kubenswrapper[4751]: I0227 16:41:11.742635 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wl6x8"] Feb 27 16:41:11 crc kubenswrapper[4751]: W0227 16:41:11.780511 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72be77d0_4306_4d1c_8397_d91b9f0a4bbb.slice/crio-ead8aed9228f0b5c4690089acb9ead8d7087a257fd1a1c0ca579524798230d0e WatchSource:0}: Error finding container ead8aed9228f0b5c4690089acb9ead8d7087a257fd1a1c0ca579524798230d0e: Status 404 returned error can't find the container with id ead8aed9228f0b5c4690089acb9ead8d7087a257fd1a1c0ca579524798230d0e Feb 27 16:41:11 crc kubenswrapper[4751]: I0227 16:41:11.932692 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl6x8" event={"ID":"72be77d0-4306-4d1c-8397-d91b9f0a4bbb","Type":"ContainerStarted","Data":"ead8aed9228f0b5c4690089acb9ead8d7087a257fd1a1c0ca579524798230d0e"} Feb 27 16:41:11 crc kubenswrapper[4751]: I0227 16:41:11.935726 4751 generic.go:334] "Generic (PLEG): container finished" podID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerID="8ec80888324b7d1de51b67d7122719692db1c731c9bc3b99823cf321f313fee5" exitCode=0 Feb 27 16:41:11 crc kubenswrapper[4751]: I0227 16:41:11.935784 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qzv64" event={"ID":"47c88bfe-cd2c-45ef-8c32-a5e79176903b","Type":"ContainerDied","Data":"8ec80888324b7d1de51b67d7122719692db1c731c9bc3b99823cf321f313fee5"} Feb 27 16:41:11 crc kubenswrapper[4751]: I0227 16:41:11.936131 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l4bcd" podUID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerName="registry-server" containerID="cri-o://d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531" gracePeriod=2 Feb 27 16:41:12 crc kubenswrapper[4751]: I0227 16:41:12.940351 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:41:12 crc kubenswrapper[4751]: I0227 16:41:12.943597 4751 generic.go:334] "Generic (PLEG): container finished" podID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerID="e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b" exitCode=0 Feb 27 16:41:12 crc kubenswrapper[4751]: I0227 16:41:12.943662 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl6x8" event={"ID":"72be77d0-4306-4d1c-8397-d91b9f0a4bbb","Type":"ContainerDied","Data":"e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b"} Feb 27 16:41:12 crc kubenswrapper[4751]: I0227 16:41:12.945872 4751 generic.go:334] "Generic (PLEG): container finished" podID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerID="d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531" exitCode=0 Feb 27 16:41:12 crc kubenswrapper[4751]: I0227 16:41:12.945906 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4bcd" event={"ID":"9c1fec5d-77c6-42e3-8117-238b615456bb","Type":"ContainerDied","Data":"d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531"} Feb 27 16:41:12 crc kubenswrapper[4751]: I0227 16:41:12.945932 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4bcd" event={"ID":"9c1fec5d-77c6-42e3-8117-238b615456bb","Type":"ContainerDied","Data":"dfc151f66d21e5d3b78912e66f695ee4526059d3891b8029633f61b2ba9aac59"} Feb 27 16:41:12 crc kubenswrapper[4751]: I0227 16:41:12.945948 4751 scope.go:117] "RemoveContainer" containerID="d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531" Feb 27 16:41:12 crc kubenswrapper[4751]: I0227 16:41:12.946058 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l4bcd" Feb 27 16:41:12 crc kubenswrapper[4751]: I0227 16:41:12.969430 4751 scope.go:117] "RemoveContainer" containerID="c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203" Feb 27 16:41:12 crc kubenswrapper[4751]: I0227 16:41:12.996915 4751 scope.go:117] "RemoveContainer" containerID="77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.014709 4751 scope.go:117] "RemoveContainer" containerID="d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531" Feb 27 16:41:13 crc kubenswrapper[4751]: E0227 16:41:13.015612 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531\": container with ID starting with d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531 not found: ID does not exist" containerID="d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.015640 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531"} err="failed to get container status \"d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531\": rpc error: code = NotFound desc = could not find container \"d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531\": container with ID starting with d7db9b609986e442954b9740326514de6cc67854ca6f7c8bbd62b4fa14757531 not found: ID does not exist" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.015661 4751 scope.go:117] "RemoveContainer" containerID="c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203" Feb 27 16:41:13 crc kubenswrapper[4751]: E0227 16:41:13.015928 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203\": container with ID starting with c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203 not found: ID does not exist" containerID="c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.015970 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203"} err="failed to get container status \"c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203\": rpc error: code = NotFound desc = could not find container \"c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203\": container with ID starting with c3d3d57f47c6de85f3e862902fa25187dd88b52f7505645ae76c8eea9958c203 not found: ID does not exist" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.015992 4751 scope.go:117] "RemoveContainer" containerID="77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19" Feb 27 16:41:13 crc kubenswrapper[4751]: E0227 16:41:13.016218 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19\": container with ID starting with 77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19 not found: ID does not exist" containerID="77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.016233 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19"} err="failed to get container status \"77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19\": rpc error: code = NotFound desc = could not find container \"77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19\": container with ID starting with 77c04a3c97a036263ea3c2e418059e7b14875ff45d8d01ff298e9f9ba2a11b19 not found: ID does not exist" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.065757 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-utilities\") pod \"9c1fec5d-77c6-42e3-8117-238b615456bb\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.065818 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-catalog-content\") pod \"9c1fec5d-77c6-42e3-8117-238b615456bb\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.065842 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4q4vz\" (UniqueName: \"kubernetes.io/projected/9c1fec5d-77c6-42e3-8117-238b615456bb-kube-api-access-4q4vz\") pod \"9c1fec5d-77c6-42e3-8117-238b615456bb\" (UID: \"9c1fec5d-77c6-42e3-8117-238b615456bb\") " Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.067536 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-utilities" (OuterVolumeSpecName: "utilities") pod "9c1fec5d-77c6-42e3-8117-238b615456bb" (UID: "9c1fec5d-77c6-42e3-8117-238b615456bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.072005 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c1fec5d-77c6-42e3-8117-238b615456bb-kube-api-access-4q4vz" (OuterVolumeSpecName: "kube-api-access-4q4vz") pod "9c1fec5d-77c6-42e3-8117-238b615456bb" (UID: "9c1fec5d-77c6-42e3-8117-238b615456bb"). InnerVolumeSpecName "kube-api-access-4q4vz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.147887 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9c1fec5d-77c6-42e3-8117-238b615456bb" (UID: "9c1fec5d-77c6-42e3-8117-238b615456bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.166912 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.166950 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4q4vz\" (UniqueName: \"kubernetes.io/projected/9c1fec5d-77c6-42e3-8117-238b615456bb-kube-api-access-4q4vz\") on node \"crc\" DevicePath \"\"" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.166962 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c1fec5d-77c6-42e3-8117-238b615456bb-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.273259 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l4bcd"] Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.277544 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l4bcd"] Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.956793 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qzv64" event={"ID":"47c88bfe-cd2c-45ef-8c32-a5e79176903b","Type":"ContainerStarted","Data":"6e8ed48c383a139d00756bec5cea792c63b747c0aeaaa9c0d91dc4e8ca25df56"} Feb 27 16:41:13 crc kubenswrapper[4751]: I0227 16:41:13.985949 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qzv64" podStartSLOduration=4.333281267 podStartE2EDuration="16.985922044s" podCreationTimestamp="2026-02-27 16:40:57 +0000 UTC" firstStartedPulling="2026-02-27 16:41:00.863612748 +0000 UTC m=+1023.010627195" lastFinishedPulling="2026-02-27 16:41:13.516253515 +0000 UTC m=+1035.663267972" observedRunningTime="2026-02-27 16:41:13.978602789 +0000 UTC m=+1036.125617276" watchObservedRunningTime="2026-02-27 16:41:13.985922044 +0000 UTC m=+1036.132936501" Feb 27 16:41:14 crc kubenswrapper[4751]: I0227 16:41:14.527298 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c1fec5d-77c6-42e3-8117-238b615456bb" path="/var/lib/kubelet/pods/9c1fec5d-77c6-42e3-8117-238b615456bb/volumes" Feb 27 16:41:14 crc kubenswrapper[4751]: I0227 16:41:14.964118 4751 generic.go:334] "Generic (PLEG): container finished" podID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerID="b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70" exitCode=0 Feb 27 16:41:14 crc kubenswrapper[4751]: I0227 16:41:14.964179 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl6x8" event={"ID":"72be77d0-4306-4d1c-8397-d91b9f0a4bbb","Type":"ContainerDied","Data":"b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70"} Feb 27 16:41:15 crc kubenswrapper[4751]: I0227 16:41:15.973583 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl6x8" event={"ID":"72be77d0-4306-4d1c-8397-d91b9f0a4bbb","Type":"ContainerStarted","Data":"acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85"} Feb 27 16:41:16 crc kubenswrapper[4751]: I0227 16:41:16.015422 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wl6x8" podStartSLOduration=7.47616924 podStartE2EDuration="10.015381134s" podCreationTimestamp="2026-02-27 16:41:06 +0000 UTC" firstStartedPulling="2026-02-27 16:41:12.945272584 +0000 UTC m=+1035.092287031" lastFinishedPulling="2026-02-27 16:41:15.484484478 +0000 UTC m=+1037.631498925" observedRunningTime="2026-02-27 16:41:16.010759581 +0000 UTC m=+1038.157774028" watchObservedRunningTime="2026-02-27 16:41:16.015381134 +0000 UTC m=+1038.162395591" Feb 27 16:41:16 crc kubenswrapper[4751]: I0227 16:41:16.859971 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:16 crc kubenswrapper[4751]: I0227 16:41:16.860020 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:17 crc kubenswrapper[4751]: I0227 16:41:17.472216 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:41:17 crc kubenswrapper[4751]: I0227 16:41:17.472305 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:41:17 crc kubenswrapper[4751]: I0227 16:41:17.533840 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:41:17 crc kubenswrapper[4751]: I0227 16:41:17.900257 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-wl6x8" podUID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerName="registry-server" probeResult="failure" output=< Feb 27 16:41:17 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 16:41:17 crc kubenswrapper[4751]: > Feb 27 16:41:26 crc kubenswrapper[4751]: I0227 16:41:26.930687 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:26 crc kubenswrapper[4751]: I0227 16:41:26.983655 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:27 crc kubenswrapper[4751]: I0227 16:41:27.166100 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wl6x8"] Feb 27 16:41:27 crc kubenswrapper[4751]: I0227 16:41:27.516170 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.053897 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wl6x8" podUID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerName="registry-server" containerID="cri-o://acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85" gracePeriod=2 Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.451678 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.477648 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-catalog-content\") pod \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.477689 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-utilities\") pod \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.477773 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spm56\" (UniqueName: \"kubernetes.io/projected/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-kube-api-access-spm56\") pod \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\" (UID: \"72be77d0-4306-4d1c-8397-d91b9f0a4bbb\") " Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.479110 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-utilities" (OuterVolumeSpecName: "utilities") pod "72be77d0-4306-4d1c-8397-d91b9f0a4bbb" (UID: "72be77d0-4306-4d1c-8397-d91b9f0a4bbb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.482687 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-kube-api-access-spm56" (OuterVolumeSpecName: "kube-api-access-spm56") pod "72be77d0-4306-4d1c-8397-d91b9f0a4bbb" (UID: "72be77d0-4306-4d1c-8397-d91b9f0a4bbb"). InnerVolumeSpecName "kube-api-access-spm56". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.523702 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "72be77d0-4306-4d1c-8397-d91b9f0a4bbb" (UID: "72be77d0-4306-4d1c-8397-d91b9f0a4bbb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.580116 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.580162 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:41:28 crc kubenswrapper[4751]: I0227 16:41:28.580176 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spm56\" (UniqueName: \"kubernetes.io/projected/72be77d0-4306-4d1c-8397-d91b9f0a4bbb-kube-api-access-spm56\") on node \"crc\" DevicePath \"\"" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.061783 4751 generic.go:334] "Generic (PLEG): container finished" podID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerID="acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85" exitCode=0 Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.061826 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl6x8" event={"ID":"72be77d0-4306-4d1c-8397-d91b9f0a4bbb","Type":"ContainerDied","Data":"acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85"} Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.061886 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wl6x8" event={"ID":"72be77d0-4306-4d1c-8397-d91b9f0a4bbb","Type":"ContainerDied","Data":"ead8aed9228f0b5c4690089acb9ead8d7087a257fd1a1c0ca579524798230d0e"} Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.061895 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wl6x8" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.061926 4751 scope.go:117] "RemoveContainer" containerID="acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.080147 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wl6x8"] Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.083293 4751 scope.go:117] "RemoveContainer" containerID="b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.084737 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wl6x8"] Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.104365 4751 scope.go:117] "RemoveContainer" containerID="e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.138303 4751 scope.go:117] "RemoveContainer" containerID="acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85" Feb 27 16:41:29 crc kubenswrapper[4751]: E0227 16:41:29.138769 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85\": container with ID starting with acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85 not found: ID does not exist" containerID="acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.138799 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85"} err="failed to get container status \"acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85\": rpc error: code = NotFound desc = could not find container \"acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85\": container with ID starting with acc3419c47ec715078db13443e43c80ab0e34b1d865e7da32f991134102ebf85 not found: ID does not exist" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.138821 4751 scope.go:117] "RemoveContainer" containerID="b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70" Feb 27 16:41:29 crc kubenswrapper[4751]: E0227 16:41:29.139033 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70\": container with ID starting with b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70 not found: ID does not exist" containerID="b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.139067 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70"} err="failed to get container status \"b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70\": rpc error: code = NotFound desc = could not find container \"b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70\": container with ID starting with b1a87aec558a13b7ba4583f84a45a1124fcd135587856da7e524aeadd8ac0c70 not found: ID does not exist" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.139083 4751 scope.go:117] "RemoveContainer" containerID="e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b" Feb 27 16:41:29 crc kubenswrapper[4751]: E0227 16:41:29.139323 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b\": container with ID starting with e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b not found: ID does not exist" containerID="e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.139346 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b"} err="failed to get container status \"e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b\": rpc error: code = NotFound desc = could not find container \"e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b\": container with ID starting with e61283898e66123cab092c6337bd4ca47f6a95f31a525a888098e88e2f998c4b not found: ID does not exist" Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.777318 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qzv64"] Feb 27 16:41:29 crc kubenswrapper[4751]: I0227 16:41:29.777678 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qzv64" podUID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerName="registry-server" containerID="cri-o://6e8ed48c383a139d00756bec5cea792c63b747c0aeaaa9c0d91dc4e8ca25df56" gracePeriod=2 Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.081216 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-b447877db-svmqs" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.085477 4751 generic.go:334] "Generic (PLEG): container finished" podID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerID="6e8ed48c383a139d00756bec5cea792c63b747c0aeaaa9c0d91dc4e8ca25df56" exitCode=0 Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.085581 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qzv64" event={"ID":"47c88bfe-cd2c-45ef-8c32-a5e79176903b","Type":"ContainerDied","Data":"6e8ed48c383a139d00756bec5cea792c63b747c0aeaaa9c0d91dc4e8ca25df56"} Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.245291 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.303842 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-utilities\") pod \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.303938 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2b4mc\" (UniqueName: \"kubernetes.io/projected/47c88bfe-cd2c-45ef-8c32-a5e79176903b-kube-api-access-2b4mc\") pod \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.303984 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-catalog-content\") pod \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\" (UID: \"47c88bfe-cd2c-45ef-8c32-a5e79176903b\") " Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.304782 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-utilities" (OuterVolumeSpecName: "utilities") pod "47c88bfe-cd2c-45ef-8c32-a5e79176903b" (UID: "47c88bfe-cd2c-45ef-8c32-a5e79176903b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.308123 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47c88bfe-cd2c-45ef-8c32-a5e79176903b-kube-api-access-2b4mc" (OuterVolumeSpecName: "kube-api-access-2b4mc") pod "47c88bfe-cd2c-45ef-8c32-a5e79176903b" (UID: "47c88bfe-cd2c-45ef-8c32-a5e79176903b"). InnerVolumeSpecName "kube-api-access-2b4mc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.328408 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "47c88bfe-cd2c-45ef-8c32-a5e79176903b" (UID: "47c88bfe-cd2c-45ef-8c32-a5e79176903b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.405375 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.405428 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c88bfe-cd2c-45ef-8c32-a5e79176903b-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.405438 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2b4mc\" (UniqueName: \"kubernetes.io/projected/47c88bfe-cd2c-45ef-8c32-a5e79176903b-kube-api-access-2b4mc\") on node \"crc\" DevicePath \"\"" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.527040 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" path="/var/lib/kubelet/pods/72be77d0-4306-4d1c-8397-d91b9f0a4bbb/volumes" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.847641 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm"] Feb 27 16:41:30 crc kubenswrapper[4751]: E0227 16:41:30.847889 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerName="registry-server" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.847904 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerName="registry-server" Feb 27 16:41:30 crc kubenswrapper[4751]: E0227 16:41:30.847919 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerName="extract-content" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.847927 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerName="extract-content" Feb 27 16:41:30 crc kubenswrapper[4751]: E0227 16:41:30.847941 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerName="registry-server" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.847949 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerName="registry-server" Feb 27 16:41:30 crc kubenswrapper[4751]: E0227 16:41:30.847958 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerName="extract-utilities" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.847967 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerName="extract-utilities" Feb 27 16:41:30 crc kubenswrapper[4751]: E0227 16:41:30.847980 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerName="registry-server" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.847987 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerName="registry-server" Feb 27 16:41:30 crc kubenswrapper[4751]: E0227 16:41:30.847995 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerName="extract-utilities" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.848003 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerName="extract-utilities" Feb 27 16:41:30 crc kubenswrapper[4751]: E0227 16:41:30.848013 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerName="extract-utilities" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.848021 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerName="extract-utilities" Feb 27 16:41:30 crc kubenswrapper[4751]: E0227 16:41:30.848033 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerName="extract-content" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.848040 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerName="extract-content" Feb 27 16:41:30 crc kubenswrapper[4751]: E0227 16:41:30.848051 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerName="extract-content" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.848058 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerName="extract-content" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.848174 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" containerName="registry-server" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.848190 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c1fec5d-77c6-42e3-8117-238b615456bb" containerName="registry-server" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.848206 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="72be77d0-4306-4d1c-8397-d91b9f0a4bbb" containerName="registry-server" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.848676 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.850839 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-cd5qj" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.851773 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.861343 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm"] Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.870277 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-nqkcw"] Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.873027 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.874668 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.874940 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.911434 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-reloader\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.911486 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-metrics\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.911580 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcmnr\" (UniqueName: \"kubernetes.io/projected/5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7-kube-api-access-rcmnr\") pod \"frr-k8s-webhook-server-7f989f654f-pr9vm\" (UID: \"5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7\") " pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.911603 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/51c1c1cc-8e81-4198-b580-3d511ed64669-metrics-certs\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.911619 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-frr-sockets\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.911653 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7-cert\") pod \"frr-k8s-webhook-server-7f989f654f-pr9vm\" (UID: \"5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7\") " pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.911668 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/51c1c1cc-8e81-4198-b580-3d511ed64669-frr-startup\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.911752 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-frr-conf\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.911804 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chrjm\" (UniqueName: \"kubernetes.io/projected/51c1c1cc-8e81-4198-b580-3d511ed64669-kube-api-access-chrjm\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.950231 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-jr2bv"] Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.951389 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-jr2bv" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.952992 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.953109 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.953169 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-g7h29" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.953169 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.968219 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-86ddb6bd46-z2rn9"] Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.969294 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:30 crc kubenswrapper[4751]: I0227 16:41:30.973852 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.008937 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-86ddb6bd46-z2rn9"] Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.014074 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7-cert\") pod \"frr-k8s-webhook-server-7f989f654f-pr9vm\" (UID: \"5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7\") " pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.014130 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/51c1c1cc-8e81-4198-b580-3d511ed64669-frr-startup\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.014160 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chrjm\" (UniqueName: \"kubernetes.io/projected/51c1c1cc-8e81-4198-b580-3d511ed64669-kube-api-access-chrjm\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.014182 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-frr-conf\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.014248 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-reloader\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.014269 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-metrics\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.014332 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcmnr\" (UniqueName: \"kubernetes.io/projected/5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7-kube-api-access-rcmnr\") pod \"frr-k8s-webhook-server-7f989f654f-pr9vm\" (UID: \"5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7\") " pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.014359 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/51c1c1cc-8e81-4198-b580-3d511ed64669-metrics-certs\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.014380 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-frr-sockets\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.015198 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-frr-sockets\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.015839 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-frr-conf\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: E0227 16:41:31.016211 4751 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Feb 27 16:41:31 crc kubenswrapper[4751]: E0227 16:41:31.016273 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/51c1c1cc-8e81-4198-b580-3d511ed64669-metrics-certs podName:51c1c1cc-8e81-4198-b580-3d511ed64669 nodeName:}" failed. No retries permitted until 2026-02-27 16:41:31.516256201 +0000 UTC m=+1053.663270648 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/51c1c1cc-8e81-4198-b580-3d511ed64669-metrics-certs") pod "frr-k8s-nqkcw" (UID: "51c1c1cc-8e81-4198-b580-3d511ed64669") : secret "frr-k8s-certs-secret" not found Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.020305 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-reloader\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.020818 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/51c1c1cc-8e81-4198-b580-3d511ed64669-metrics\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.024371 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7-cert\") pod \"frr-k8s-webhook-server-7f989f654f-pr9vm\" (UID: \"5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7\") " pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.024818 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/51c1c1cc-8e81-4198-b580-3d511ed64669-frr-startup\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.044197 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcmnr\" (UniqueName: \"kubernetes.io/projected/5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7-kube-api-access-rcmnr\") pod \"frr-k8s-webhook-server-7f989f654f-pr9vm\" (UID: \"5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7\") " pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.049693 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chrjm\" (UniqueName: \"kubernetes.io/projected/51c1c1cc-8e81-4198-b580-3d511ed64669-kube-api-access-chrjm\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.095416 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qzv64" event={"ID":"47c88bfe-cd2c-45ef-8c32-a5e79176903b","Type":"ContainerDied","Data":"8e99e4eef5fc01cf45db3e7bff28efa3cf2417991bc3fdeb1b7cd6952ff4da6d"} Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.095815 4751 scope.go:117] "RemoveContainer" containerID="6e8ed48c383a139d00756bec5cea792c63b747c0aeaaa9c0d91dc4e8ca25df56" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.095482 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qzv64" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.116080 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-memberlist\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.116321 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d5d4586f-9e6d-471f-a07c-96e142df13ec-cert\") pod \"controller-86ddb6bd46-z2rn9\" (UID: \"d5d4586f-9e6d-471f-a07c-96e142df13ec\") " pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.116472 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d5d4586f-9e6d-471f-a07c-96e142df13ec-metrics-certs\") pod \"controller-86ddb6bd46-z2rn9\" (UID: \"d5d4586f-9e6d-471f-a07c-96e142df13ec\") " pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.116599 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sdzf\" (UniqueName: \"kubernetes.io/projected/d5d4586f-9e6d-471f-a07c-96e142df13ec-kube-api-access-4sdzf\") pod \"controller-86ddb6bd46-z2rn9\" (UID: \"d5d4586f-9e6d-471f-a07c-96e142df13ec\") " pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.117251 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-metrics-certs\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.117343 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/620b652a-8111-4c0a-86b0-d692bb768d8b-metallb-excludel2\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.117520 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpkcw\" (UniqueName: \"kubernetes.io/projected/620b652a-8111-4c0a-86b0-d692bb768d8b-kube-api-access-wpkcw\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.124581 4751 scope.go:117] "RemoveContainer" containerID="8ec80888324b7d1de51b67d7122719692db1c731c9bc3b99823cf321f313fee5" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.124977 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qzv64"] Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.129330 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qzv64"] Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.143040 4751 scope.go:117] "RemoveContainer" containerID="a9af31915d14db934b66ef2c3f7e6278e23e624b4f2995d40425f416d3d0383a" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.163899 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.218292 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-metrics-certs\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.218327 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/620b652a-8111-4c0a-86b0-d692bb768d8b-metallb-excludel2\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.218378 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpkcw\" (UniqueName: \"kubernetes.io/projected/620b652a-8111-4c0a-86b0-d692bb768d8b-kube-api-access-wpkcw\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.218415 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-memberlist\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.218431 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d5d4586f-9e6d-471f-a07c-96e142df13ec-cert\") pod \"controller-86ddb6bd46-z2rn9\" (UID: \"d5d4586f-9e6d-471f-a07c-96e142df13ec\") " pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.218484 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d5d4586f-9e6d-471f-a07c-96e142df13ec-metrics-certs\") pod \"controller-86ddb6bd46-z2rn9\" (UID: \"d5d4586f-9e6d-471f-a07c-96e142df13ec\") " pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.218504 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sdzf\" (UniqueName: \"kubernetes.io/projected/d5d4586f-9e6d-471f-a07c-96e142df13ec-kube-api-access-4sdzf\") pod \"controller-86ddb6bd46-z2rn9\" (UID: \"d5d4586f-9e6d-471f-a07c-96e142df13ec\") " pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:31 crc kubenswrapper[4751]: E0227 16:41:31.218824 4751 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Feb 27 16:41:31 crc kubenswrapper[4751]: E0227 16:41:31.218874 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5d4586f-9e6d-471f-a07c-96e142df13ec-metrics-certs podName:d5d4586f-9e6d-471f-a07c-96e142df13ec nodeName:}" failed. No retries permitted until 2026-02-27 16:41:31.718858224 +0000 UTC m=+1053.865872671 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d5d4586f-9e6d-471f-a07c-96e142df13ec-metrics-certs") pod "controller-86ddb6bd46-z2rn9" (UID: "d5d4586f-9e6d-471f-a07c-96e142df13ec") : secret "controller-certs-secret" not found Feb 27 16:41:31 crc kubenswrapper[4751]: E0227 16:41:31.218878 4751 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Feb 27 16:41:31 crc kubenswrapper[4751]: E0227 16:41:31.218916 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-memberlist podName:620b652a-8111-4c0a-86b0-d692bb768d8b nodeName:}" failed. No retries permitted until 2026-02-27 16:41:31.718904726 +0000 UTC m=+1053.865919183 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-memberlist") pod "speaker-jr2bv" (UID: "620b652a-8111-4c0a-86b0-d692bb768d8b") : secret "metallb-memberlist" not found Feb 27 16:41:31 crc kubenswrapper[4751]: E0227 16:41:31.219040 4751 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Feb 27 16:41:31 crc kubenswrapper[4751]: E0227 16:41:31.219116 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-metrics-certs podName:620b652a-8111-4c0a-86b0-d692bb768d8b nodeName:}" failed. No retries permitted until 2026-02-27 16:41:31.719100271 +0000 UTC m=+1053.866114718 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-metrics-certs") pod "speaker-jr2bv" (UID: "620b652a-8111-4c0a-86b0-d692bb768d8b") : secret "speaker-certs-secret" not found Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.222111 4751 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.223156 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/620b652a-8111-4c0a-86b0-d692bb768d8b-metallb-excludel2\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.235285 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sdzf\" (UniqueName: \"kubernetes.io/projected/d5d4586f-9e6d-471f-a07c-96e142df13ec-kube-api-access-4sdzf\") pod \"controller-86ddb6bd46-z2rn9\" (UID: \"d5d4586f-9e6d-471f-a07c-96e142df13ec\") " pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.236471 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d5d4586f-9e6d-471f-a07c-96e142df13ec-cert\") pod \"controller-86ddb6bd46-z2rn9\" (UID: \"d5d4586f-9e6d-471f-a07c-96e142df13ec\") " pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.240849 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpkcw\" (UniqueName: \"kubernetes.io/projected/620b652a-8111-4c0a-86b0-d692bb768d8b-kube-api-access-wpkcw\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.394011 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm"] Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.521917 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/51c1c1cc-8e81-4198-b580-3d511ed64669-metrics-certs\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.525838 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/51c1c1cc-8e81-4198-b580-3d511ed64669-metrics-certs\") pod \"frr-k8s-nqkcw\" (UID: \"51c1c1cc-8e81-4198-b580-3d511ed64669\") " pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.724803 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d5d4586f-9e6d-471f-a07c-96e142df13ec-metrics-certs\") pod \"controller-86ddb6bd46-z2rn9\" (UID: \"d5d4586f-9e6d-471f-a07c-96e142df13ec\") " pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.725205 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-metrics-certs\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.725281 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-memberlist\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: E0227 16:41:31.725380 4751 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Feb 27 16:41:31 crc kubenswrapper[4751]: E0227 16:41:31.725444 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-memberlist podName:620b652a-8111-4c0a-86b0-d692bb768d8b nodeName:}" failed. No retries permitted until 2026-02-27 16:41:32.725429924 +0000 UTC m=+1054.872444371 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-memberlist") pod "speaker-jr2bv" (UID: "620b652a-8111-4c0a-86b0-d692bb768d8b") : secret "metallb-memberlist" not found Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.728367 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-metrics-certs\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.728959 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d5d4586f-9e6d-471f-a07c-96e142df13ec-metrics-certs\") pod \"controller-86ddb6bd46-z2rn9\" (UID: \"d5d4586f-9e6d-471f-a07c-96e142df13ec\") " pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.796258 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:31 crc kubenswrapper[4751]: I0227 16:41:31.883453 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:32 crc kubenswrapper[4751]: I0227 16:41:32.062261 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-86ddb6bd46-z2rn9"] Feb 27 16:41:32 crc kubenswrapper[4751]: W0227 16:41:32.071483 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5d4586f_9e6d_471f_a07c_96e142df13ec.slice/crio-581373842f3ba2584654fead4d25586645ffb0355652325944c98e834c1d7c1b WatchSource:0}: Error finding container 581373842f3ba2584654fead4d25586645ffb0355652325944c98e834c1d7c1b: Status 404 returned error can't find the container with id 581373842f3ba2584654fead4d25586645ffb0355652325944c98e834c1d7c1b Feb 27 16:41:32 crc kubenswrapper[4751]: I0227 16:41:32.103599 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" event={"ID":"5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7","Type":"ContainerStarted","Data":"e6114c48a4435efb0df4f4f780b3f240a99f40f40fdca5d332bff4b6027f68f9"} Feb 27 16:41:32 crc kubenswrapper[4751]: I0227 16:41:32.104520 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-86ddb6bd46-z2rn9" event={"ID":"d5d4586f-9e6d-471f-a07c-96e142df13ec","Type":"ContainerStarted","Data":"581373842f3ba2584654fead4d25586645ffb0355652325944c98e834c1d7c1b"} Feb 27 16:41:32 crc kubenswrapper[4751]: I0227 16:41:32.105222 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nqkcw" event={"ID":"51c1c1cc-8e81-4198-b580-3d511ed64669","Type":"ContainerStarted","Data":"8457d51c6b27ce2173953356decaddeb72f8c764d570737db603797bcda659a9"} Feb 27 16:41:32 crc kubenswrapper[4751]: I0227 16:41:32.529923 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47c88bfe-cd2c-45ef-8c32-a5e79176903b" path="/var/lib/kubelet/pods/47c88bfe-cd2c-45ef-8c32-a5e79176903b/volumes" Feb 27 16:41:32 crc kubenswrapper[4751]: I0227 16:41:32.738941 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-memberlist\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:32 crc kubenswrapper[4751]: I0227 16:41:32.744108 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/620b652a-8111-4c0a-86b0-d692bb768d8b-memberlist\") pod \"speaker-jr2bv\" (UID: \"620b652a-8111-4c0a-86b0-d692bb768d8b\") " pod="metallb-system/speaker-jr2bv" Feb 27 16:41:32 crc kubenswrapper[4751]: I0227 16:41:32.768775 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-jr2bv" Feb 27 16:41:33 crc kubenswrapper[4751]: I0227 16:41:33.115714 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-86ddb6bd46-z2rn9" event={"ID":"d5d4586f-9e6d-471f-a07c-96e142df13ec","Type":"ContainerStarted","Data":"06ddf2f5839130ef0de8dac29eb6ec4a7a0599ca5d929e228091d787e5d6f924"} Feb 27 16:41:33 crc kubenswrapper[4751]: I0227 16:41:33.115781 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-86ddb6bd46-z2rn9" event={"ID":"d5d4586f-9e6d-471f-a07c-96e142df13ec","Type":"ContainerStarted","Data":"b68a75e78cb7ece80928f71b06ec20380774615596a61c267274b2ecfb4c7ca3"} Feb 27 16:41:33 crc kubenswrapper[4751]: I0227 16:41:33.115841 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:33 crc kubenswrapper[4751]: I0227 16:41:33.120694 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-jr2bv" event={"ID":"620b652a-8111-4c0a-86b0-d692bb768d8b","Type":"ContainerStarted","Data":"3b94de9a0c920c0eeb9333a3ceb770edd6a8d6a81d19ad44350459d2f210d061"} Feb 27 16:41:33 crc kubenswrapper[4751]: I0227 16:41:33.142565 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-86ddb6bd46-z2rn9" podStartSLOduration=3.142544004 podStartE2EDuration="3.142544004s" podCreationTimestamp="2026-02-27 16:41:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:41:33.138781104 +0000 UTC m=+1055.285795561" watchObservedRunningTime="2026-02-27 16:41:33.142544004 +0000 UTC m=+1055.289558451" Feb 27 16:41:34 crc kubenswrapper[4751]: I0227 16:41:34.131573 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-jr2bv" event={"ID":"620b652a-8111-4c0a-86b0-d692bb768d8b","Type":"ContainerStarted","Data":"21e22e039c9c5db66826527250ca56b6d97c51c47a220b036d731fccfd07f3df"} Feb 27 16:41:34 crc kubenswrapper[4751]: I0227 16:41:34.131931 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-jr2bv" event={"ID":"620b652a-8111-4c0a-86b0-d692bb768d8b","Type":"ContainerStarted","Data":"dd047c80d9a49747d56e8fe7c55cfbb179024fe1ee16c8792cd5f501bba9d803"} Feb 27 16:41:34 crc kubenswrapper[4751]: I0227 16:41:34.131963 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-jr2bv" Feb 27 16:41:34 crc kubenswrapper[4751]: I0227 16:41:34.148276 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-jr2bv" podStartSLOduration=4.148257515 podStartE2EDuration="4.148257515s" podCreationTimestamp="2026-02-27 16:41:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:41:34.145525963 +0000 UTC m=+1056.292540410" watchObservedRunningTime="2026-02-27 16:41:34.148257515 +0000 UTC m=+1056.295271962" Feb 27 16:41:39 crc kubenswrapper[4751]: I0227 16:41:39.162646 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" event={"ID":"5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7","Type":"ContainerStarted","Data":"fe628971fc86238e0c58d5e2a9b730eb3d32a987dae792c4860db58c566bdd25"} Feb 27 16:41:39 crc kubenswrapper[4751]: I0227 16:41:39.162737 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" Feb 27 16:41:39 crc kubenswrapper[4751]: I0227 16:41:39.165356 4751 generic.go:334] "Generic (PLEG): container finished" podID="51c1c1cc-8e81-4198-b580-3d511ed64669" containerID="4415e04abb15d63db7e3b8ef35b562156464a78f22bd02c3002e6c3468683360" exitCode=0 Feb 27 16:41:39 crc kubenswrapper[4751]: I0227 16:41:39.165385 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nqkcw" event={"ID":"51c1c1cc-8e81-4198-b580-3d511ed64669","Type":"ContainerDied","Data":"4415e04abb15d63db7e3b8ef35b562156464a78f22bd02c3002e6c3468683360"} Feb 27 16:41:39 crc kubenswrapper[4751]: I0227 16:41:39.196144 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" podStartSLOduration=2.270171286 podStartE2EDuration="9.196120342s" podCreationTimestamp="2026-02-27 16:41:30 +0000 UTC" firstStartedPulling="2026-02-27 16:41:31.404341772 +0000 UTC m=+1053.551356219" lastFinishedPulling="2026-02-27 16:41:38.330290818 +0000 UTC m=+1060.477305275" observedRunningTime="2026-02-27 16:41:39.183301162 +0000 UTC m=+1061.330315619" watchObservedRunningTime="2026-02-27 16:41:39.196120342 +0000 UTC m=+1061.343134809" Feb 27 16:41:40 crc kubenswrapper[4751]: I0227 16:41:40.173942 4751 generic.go:334] "Generic (PLEG): container finished" podID="51c1c1cc-8e81-4198-b580-3d511ed64669" containerID="2fb60e5ce2bb820a49bb8c1fe63c0111f8278f0d86f642474addaa055d4d691d" exitCode=0 Feb 27 16:41:40 crc kubenswrapper[4751]: I0227 16:41:40.174014 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nqkcw" event={"ID":"51c1c1cc-8e81-4198-b580-3d511ed64669","Type":"ContainerDied","Data":"2fb60e5ce2bb820a49bb8c1fe63c0111f8278f0d86f642474addaa055d4d691d"} Feb 27 16:41:41 crc kubenswrapper[4751]: I0227 16:41:41.181649 4751 generic.go:334] "Generic (PLEG): container finished" podID="51c1c1cc-8e81-4198-b580-3d511ed64669" containerID="1bb0c41287cebe303993f17fce13975b345948be6b88e95552425aafc2aaf546" exitCode=0 Feb 27 16:41:41 crc kubenswrapper[4751]: I0227 16:41:41.181702 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nqkcw" event={"ID":"51c1c1cc-8e81-4198-b580-3d511ed64669","Type":"ContainerDied","Data":"1bb0c41287cebe303993f17fce13975b345948be6b88e95552425aafc2aaf546"} Feb 27 16:41:42 crc kubenswrapper[4751]: I0227 16:41:42.193132 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nqkcw" event={"ID":"51c1c1cc-8e81-4198-b580-3d511ed64669","Type":"ContainerStarted","Data":"dcdfdac985101e2155177204a2f25b6db791c8c2e2eb20bae86366aa7528de74"} Feb 27 16:41:42 crc kubenswrapper[4751]: I0227 16:41:42.193530 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nqkcw" event={"ID":"51c1c1cc-8e81-4198-b580-3d511ed64669","Type":"ContainerStarted","Data":"2d2648af7f922877669b0ef97036f0264591f0bcb5e6c1662362d98c2c36ef54"} Feb 27 16:41:42 crc kubenswrapper[4751]: I0227 16:41:42.193545 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nqkcw" event={"ID":"51c1c1cc-8e81-4198-b580-3d511ed64669","Type":"ContainerStarted","Data":"975800abb6e6747d428c96a5a9bf685e14c65d0e273464563bbb3c35dcabcefe"} Feb 27 16:41:42 crc kubenswrapper[4751]: I0227 16:41:42.193559 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nqkcw" event={"ID":"51c1c1cc-8e81-4198-b580-3d511ed64669","Type":"ContainerStarted","Data":"84669ba2e529583f0d4419decc1e96e1297e89498cac287964d47da1d866ac88"} Feb 27 16:41:43 crc kubenswrapper[4751]: I0227 16:41:43.203678 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nqkcw" event={"ID":"51c1c1cc-8e81-4198-b580-3d511ed64669","Type":"ContainerStarted","Data":"9d428e7617bd5a8f683276927c45d48dfcb7e31d427352b973f6ab35e0619a16"} Feb 27 16:41:44 crc kubenswrapper[4751]: I0227 16:41:44.215938 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nqkcw" event={"ID":"51c1c1cc-8e81-4198-b580-3d511ed64669","Type":"ContainerStarted","Data":"8324fd0f3a22bd2018dcddde98bc35a6c96ea482d51cedf230e4f5d4b89ae8f1"} Feb 27 16:41:44 crc kubenswrapper[4751]: I0227 16:41:44.216270 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:44 crc kubenswrapper[4751]: I0227 16:41:44.241757 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-nqkcw" podStartSLOduration=7.771854441 podStartE2EDuration="14.241734959s" podCreationTimestamp="2026-02-27 16:41:30 +0000 UTC" firstStartedPulling="2026-02-27 16:41:31.896636422 +0000 UTC m=+1054.043650889" lastFinishedPulling="2026-02-27 16:41:38.36651696 +0000 UTC m=+1060.513531407" observedRunningTime="2026-02-27 16:41:44.241573154 +0000 UTC m=+1066.388587631" watchObservedRunningTime="2026-02-27 16:41:44.241734959 +0000 UTC m=+1066.388749406" Feb 27 16:41:46 crc kubenswrapper[4751]: I0227 16:41:46.796826 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:46 crc kubenswrapper[4751]: I0227 16:41:46.833095 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:51 crc kubenswrapper[4751]: I0227 16:41:51.171743 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7f989f654f-pr9vm" Feb 27 16:41:51 crc kubenswrapper[4751]: I0227 16:41:51.806911 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-nqkcw" Feb 27 16:41:51 crc kubenswrapper[4751]: I0227 16:41:51.887496 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-86ddb6bd46-z2rn9" Feb 27 16:41:52 crc kubenswrapper[4751]: I0227 16:41:52.776139 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-jr2bv" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.468523 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm"] Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.469721 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.472190 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.479233 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm"] Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.571595 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.571656 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.571779 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jt9p\" (UniqueName: \"kubernetes.io/projected/b9d9ee08-5a13-4f98-916d-b9b330f8963c-kube-api-access-2jt9p\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.673043 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.673137 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jt9p\" (UniqueName: \"kubernetes.io/projected/b9d9ee08-5a13-4f98-916d-b9b330f8963c-kube-api-access-2jt9p\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.673168 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.673776 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.674031 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.693033 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jt9p\" (UniqueName: \"kubernetes.io/projected/b9d9ee08-5a13-4f98-916d-b9b330f8963c-kube-api-access-2jt9p\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:54 crc kubenswrapper[4751]: I0227 16:41:54.833024 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:41:55 crc kubenswrapper[4751]: I0227 16:41:55.042457 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm"] Feb 27 16:41:55 crc kubenswrapper[4751]: I0227 16:41:55.308143 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" event={"ID":"b9d9ee08-5a13-4f98-916d-b9b330f8963c","Type":"ContainerStarted","Data":"870ae1cf18dec446af7a2178149b416a54aaa9d970a0c6381fd30fd89ad72ae2"} Feb 27 16:41:55 crc kubenswrapper[4751]: I0227 16:41:55.308191 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" event={"ID":"b9d9ee08-5a13-4f98-916d-b9b330f8963c","Type":"ContainerStarted","Data":"dc87ff071023b56fed5c6c4015b0ab6c5bbe5c9bf7602346d2a06ab5070c5389"} Feb 27 16:41:56 crc kubenswrapper[4751]: I0227 16:41:56.317901 4751 generic.go:334] "Generic (PLEG): container finished" podID="b9d9ee08-5a13-4f98-916d-b9b330f8963c" containerID="870ae1cf18dec446af7a2178149b416a54aaa9d970a0c6381fd30fd89ad72ae2" exitCode=0 Feb 27 16:41:56 crc kubenswrapper[4751]: I0227 16:41:56.317959 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" event={"ID":"b9d9ee08-5a13-4f98-916d-b9b330f8963c","Type":"ContainerDied","Data":"870ae1cf18dec446af7a2178149b416a54aaa9d970a0c6381fd30fd89ad72ae2"} Feb 27 16:41:58 crc kubenswrapper[4751]: I0227 16:41:58.919478 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:41:58 crc kubenswrapper[4751]: I0227 16:41:58.920202 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.141469 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536842-nq5mv"] Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.142147 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536842-nq5mv" Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.146764 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.146886 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.147243 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.161273 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536842-nq5mv"] Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.282779 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztg6c\" (UniqueName: \"kubernetes.io/projected/d07cffe8-018d-43c5-80bc-c729dca39251-kube-api-access-ztg6c\") pod \"auto-csr-approver-29536842-nq5mv\" (UID: \"d07cffe8-018d-43c5-80bc-c729dca39251\") " pod="openshift-infra/auto-csr-approver-29536842-nq5mv" Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.384297 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztg6c\" (UniqueName: \"kubernetes.io/projected/d07cffe8-018d-43c5-80bc-c729dca39251-kube-api-access-ztg6c\") pod \"auto-csr-approver-29536842-nq5mv\" (UID: \"d07cffe8-018d-43c5-80bc-c729dca39251\") " pod="openshift-infra/auto-csr-approver-29536842-nq5mv" Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.416871 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztg6c\" (UniqueName: \"kubernetes.io/projected/d07cffe8-018d-43c5-80bc-c729dca39251-kube-api-access-ztg6c\") pod \"auto-csr-approver-29536842-nq5mv\" (UID: \"d07cffe8-018d-43c5-80bc-c729dca39251\") " pod="openshift-infra/auto-csr-approver-29536842-nq5mv" Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.467358 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536842-nq5mv" Feb 27 16:42:00 crc kubenswrapper[4751]: I0227 16:42:00.975816 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536842-nq5mv"] Feb 27 16:42:01 crc kubenswrapper[4751]: W0227 16:42:01.000509 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd07cffe8_018d_43c5_80bc_c729dca39251.slice/crio-66203b7ff97b43e292d2d4985238c6089fd77e0aa6710d33c7dd32700deee73c WatchSource:0}: Error finding container 66203b7ff97b43e292d2d4985238c6089fd77e0aa6710d33c7dd32700deee73c: Status 404 returned error can't find the container with id 66203b7ff97b43e292d2d4985238c6089fd77e0aa6710d33c7dd32700deee73c Feb 27 16:42:01 crc kubenswrapper[4751]: I0227 16:42:01.354678 4751 generic.go:334] "Generic (PLEG): container finished" podID="b9d9ee08-5a13-4f98-916d-b9b330f8963c" containerID="ff87893c5e77025ec3845a7b24f72e6a4d8ef8207718cbe8195185853710869d" exitCode=0 Feb 27 16:42:01 crc kubenswrapper[4751]: I0227 16:42:01.354806 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" event={"ID":"b9d9ee08-5a13-4f98-916d-b9b330f8963c","Type":"ContainerDied","Data":"ff87893c5e77025ec3845a7b24f72e6a4d8ef8207718cbe8195185853710869d"} Feb 27 16:42:01 crc kubenswrapper[4751]: I0227 16:42:01.355780 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536842-nq5mv" event={"ID":"d07cffe8-018d-43c5-80bc-c729dca39251","Type":"ContainerStarted","Data":"66203b7ff97b43e292d2d4985238c6089fd77e0aa6710d33c7dd32700deee73c"} Feb 27 16:42:02 crc kubenswrapper[4751]: I0227 16:42:02.364614 4751 generic.go:334] "Generic (PLEG): container finished" podID="b9d9ee08-5a13-4f98-916d-b9b330f8963c" containerID="cc30108db12d1eb00865c14657e22775d88b9a50badfb023dbae0a79941eca1e" exitCode=0 Feb 27 16:42:02 crc kubenswrapper[4751]: I0227 16:42:02.364663 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" event={"ID":"b9d9ee08-5a13-4f98-916d-b9b330f8963c","Type":"ContainerDied","Data":"cc30108db12d1eb00865c14657e22775d88b9a50badfb023dbae0a79941eca1e"} Feb 27 16:42:02 crc kubenswrapper[4751]: I0227 16:42:02.366699 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536842-nq5mv" event={"ID":"d07cffe8-018d-43c5-80bc-c729dca39251","Type":"ContainerStarted","Data":"2d396d144fb1623ac36f7774b720c3e6a1114543bd6fe318832210eda9c71045"} Feb 27 16:42:02 crc kubenswrapper[4751]: I0227 16:42:02.400140 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536842-nq5mv" podStartSLOduration=1.5267142009999999 podStartE2EDuration="2.400119876s" podCreationTimestamp="2026-02-27 16:42:00 +0000 UTC" firstStartedPulling="2026-02-27 16:42:01.003283504 +0000 UTC m=+1083.150297951" lastFinishedPulling="2026-02-27 16:42:01.876689179 +0000 UTC m=+1084.023703626" observedRunningTime="2026-02-27 16:42:02.39950511 +0000 UTC m=+1084.546519557" watchObservedRunningTime="2026-02-27 16:42:02.400119876 +0000 UTC m=+1084.547134333" Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.377815 4751 generic.go:334] "Generic (PLEG): container finished" podID="d07cffe8-018d-43c5-80bc-c729dca39251" containerID="2d396d144fb1623ac36f7774b720c3e6a1114543bd6fe318832210eda9c71045" exitCode=0 Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.377982 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536842-nq5mv" event={"ID":"d07cffe8-018d-43c5-80bc-c729dca39251","Type":"ContainerDied","Data":"2d396d144fb1623ac36f7774b720c3e6a1114543bd6fe318832210eda9c71045"} Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.684871 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.835471 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-bundle\") pod \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.835590 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-util\") pod \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.835634 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jt9p\" (UniqueName: \"kubernetes.io/projected/b9d9ee08-5a13-4f98-916d-b9b330f8963c-kube-api-access-2jt9p\") pod \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\" (UID: \"b9d9ee08-5a13-4f98-916d-b9b330f8963c\") " Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.837647 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-bundle" (OuterVolumeSpecName: "bundle") pod "b9d9ee08-5a13-4f98-916d-b9b330f8963c" (UID: "b9d9ee08-5a13-4f98-916d-b9b330f8963c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.843825 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9d9ee08-5a13-4f98-916d-b9b330f8963c-kube-api-access-2jt9p" (OuterVolumeSpecName: "kube-api-access-2jt9p") pod "b9d9ee08-5a13-4f98-916d-b9b330f8963c" (UID: "b9d9ee08-5a13-4f98-916d-b9b330f8963c"). InnerVolumeSpecName "kube-api-access-2jt9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.852383 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-util" (OuterVolumeSpecName: "util") pod "b9d9ee08-5a13-4f98-916d-b9b330f8963c" (UID: "b9d9ee08-5a13-4f98-916d-b9b330f8963c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.937607 4751 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.937646 4751 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b9d9ee08-5a13-4f98-916d-b9b330f8963c-util\") on node \"crc\" DevicePath \"\"" Feb 27 16:42:03 crc kubenswrapper[4751]: I0227 16:42:03.937661 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jt9p\" (UniqueName: \"kubernetes.io/projected/b9d9ee08-5a13-4f98-916d-b9b330f8963c-kube-api-access-2jt9p\") on node \"crc\" DevicePath \"\"" Feb 27 16:42:04 crc kubenswrapper[4751]: I0227 16:42:04.390585 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" Feb 27 16:42:04 crc kubenswrapper[4751]: I0227 16:42:04.390652 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm" event={"ID":"b9d9ee08-5a13-4f98-916d-b9b330f8963c","Type":"ContainerDied","Data":"dc87ff071023b56fed5c6c4015b0ab6c5bbe5c9bf7602346d2a06ab5070c5389"} Feb 27 16:42:04 crc kubenswrapper[4751]: I0227 16:42:04.390691 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc87ff071023b56fed5c6c4015b0ab6c5bbe5c9bf7602346d2a06ab5070c5389" Feb 27 16:42:04 crc kubenswrapper[4751]: I0227 16:42:04.692109 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536842-nq5mv" Feb 27 16:42:04 crc kubenswrapper[4751]: I0227 16:42:04.849540 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztg6c\" (UniqueName: \"kubernetes.io/projected/d07cffe8-018d-43c5-80bc-c729dca39251-kube-api-access-ztg6c\") pod \"d07cffe8-018d-43c5-80bc-c729dca39251\" (UID: \"d07cffe8-018d-43c5-80bc-c729dca39251\") " Feb 27 16:42:04 crc kubenswrapper[4751]: I0227 16:42:04.857053 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d07cffe8-018d-43c5-80bc-c729dca39251-kube-api-access-ztg6c" (OuterVolumeSpecName: "kube-api-access-ztg6c") pod "d07cffe8-018d-43c5-80bc-c729dca39251" (UID: "d07cffe8-018d-43c5-80bc-c729dca39251"). InnerVolumeSpecName "kube-api-access-ztg6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:42:04 crc kubenswrapper[4751]: I0227 16:42:04.951729 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztg6c\" (UniqueName: \"kubernetes.io/projected/d07cffe8-018d-43c5-80bc-c729dca39251-kube-api-access-ztg6c\") on node \"crc\" DevicePath \"\"" Feb 27 16:42:05 crc kubenswrapper[4751]: I0227 16:42:05.401059 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536842-nq5mv" event={"ID":"d07cffe8-018d-43c5-80bc-c729dca39251","Type":"ContainerDied","Data":"66203b7ff97b43e292d2d4985238c6089fd77e0aa6710d33c7dd32700deee73c"} Feb 27 16:42:05 crc kubenswrapper[4751]: I0227 16:42:05.401138 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66203b7ff97b43e292d2d4985238c6089fd77e0aa6710d33c7dd32700deee73c" Feb 27 16:42:05 crc kubenswrapper[4751]: I0227 16:42:05.401145 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536842-nq5mv" Feb 27 16:42:05 crc kubenswrapper[4751]: I0227 16:42:05.762824 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536836-txd2x"] Feb 27 16:42:05 crc kubenswrapper[4751]: I0227 16:42:05.767034 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536836-txd2x"] Feb 27 16:42:06 crc kubenswrapper[4751]: I0227 16:42:06.533522 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="837698d3-c1f7-4873-b909-5fbab8c45f05" path="/var/lib/kubelet/pods/837698d3-c1f7-4873-b909-5fbab8c45f05/volumes" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.834805 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc"] Feb 27 16:42:12 crc kubenswrapper[4751]: E0227 16:42:12.835320 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d07cffe8-018d-43c5-80bc-c729dca39251" containerName="oc" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.835333 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d07cffe8-018d-43c5-80bc-c729dca39251" containerName="oc" Feb 27 16:42:12 crc kubenswrapper[4751]: E0227 16:42:12.835344 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9d9ee08-5a13-4f98-916d-b9b330f8963c" containerName="pull" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.835350 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9d9ee08-5a13-4f98-916d-b9b330f8963c" containerName="pull" Feb 27 16:42:12 crc kubenswrapper[4751]: E0227 16:42:12.835361 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9d9ee08-5a13-4f98-916d-b9b330f8963c" containerName="extract" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.835367 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9d9ee08-5a13-4f98-916d-b9b330f8963c" containerName="extract" Feb 27 16:42:12 crc kubenswrapper[4751]: E0227 16:42:12.835375 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9d9ee08-5a13-4f98-916d-b9b330f8963c" containerName="util" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.835381 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9d9ee08-5a13-4f98-916d-b9b330f8963c" containerName="util" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.835495 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d07cffe8-018d-43c5-80bc-c729dca39251" containerName="oc" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.835508 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9d9ee08-5a13-4f98-916d-b9b330f8963c" containerName="extract" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.835900 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.843953 4751 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-2jfth" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.844075 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.844197 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.863028 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc"] Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.964143 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kv84q\" (UniqueName: \"kubernetes.io/projected/63443514-9c6f-43be-81b9-af3530b4827e-kube-api-access-kv84q\") pod \"cert-manager-operator-controller-manager-66c8bdd694-kk7zc\" (UID: \"63443514-9c6f-43be-81b9-af3530b4827e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" Feb 27 16:42:12 crc kubenswrapper[4751]: I0227 16:42:12.964580 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/63443514-9c6f-43be-81b9-af3530b4827e-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-kk7zc\" (UID: \"63443514-9c6f-43be-81b9-af3530b4827e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" Feb 27 16:42:13 crc kubenswrapper[4751]: I0227 16:42:13.065953 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kv84q\" (UniqueName: \"kubernetes.io/projected/63443514-9c6f-43be-81b9-af3530b4827e-kube-api-access-kv84q\") pod \"cert-manager-operator-controller-manager-66c8bdd694-kk7zc\" (UID: \"63443514-9c6f-43be-81b9-af3530b4827e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" Feb 27 16:42:13 crc kubenswrapper[4751]: I0227 16:42:13.066025 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/63443514-9c6f-43be-81b9-af3530b4827e-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-kk7zc\" (UID: \"63443514-9c6f-43be-81b9-af3530b4827e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" Feb 27 16:42:13 crc kubenswrapper[4751]: I0227 16:42:13.066500 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/63443514-9c6f-43be-81b9-af3530b4827e-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-kk7zc\" (UID: \"63443514-9c6f-43be-81b9-af3530b4827e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" Feb 27 16:42:13 crc kubenswrapper[4751]: I0227 16:42:13.088329 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kv84q\" (UniqueName: \"kubernetes.io/projected/63443514-9c6f-43be-81b9-af3530b4827e-kube-api-access-kv84q\") pod \"cert-manager-operator-controller-manager-66c8bdd694-kk7zc\" (UID: \"63443514-9c6f-43be-81b9-af3530b4827e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" Feb 27 16:42:13 crc kubenswrapper[4751]: I0227 16:42:13.165811 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" Feb 27 16:42:13 crc kubenswrapper[4751]: I0227 16:42:13.464674 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc"] Feb 27 16:42:13 crc kubenswrapper[4751]: W0227 16:42:13.478467 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63443514_9c6f_43be_81b9_af3530b4827e.slice/crio-44606ecd5ce08cb3e7c8c195cb725558d7052413a1fac39378f1262fe43664e0 WatchSource:0}: Error finding container 44606ecd5ce08cb3e7c8c195cb725558d7052413a1fac39378f1262fe43664e0: Status 404 returned error can't find the container with id 44606ecd5ce08cb3e7c8c195cb725558d7052413a1fac39378f1262fe43664e0 Feb 27 16:42:14 crc kubenswrapper[4751]: I0227 16:42:14.474198 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" event={"ID":"63443514-9c6f-43be-81b9-af3530b4827e","Type":"ContainerStarted","Data":"44606ecd5ce08cb3e7c8c195cb725558d7052413a1fac39378f1262fe43664e0"} Feb 27 16:42:17 crc kubenswrapper[4751]: I0227 16:42:17.493675 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" event={"ID":"63443514-9c6f-43be-81b9-af3530b4827e","Type":"ContainerStarted","Data":"79dbe09ff12a3de8118381a92cc7e98bc819b4c32d2896102b201687fc6d1f94"} Feb 27 16:42:17 crc kubenswrapper[4751]: I0227 16:42:17.519840 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-kk7zc" podStartSLOduration=2.23433436 podStartE2EDuration="5.519827052s" podCreationTimestamp="2026-02-27 16:42:12 +0000 UTC" firstStartedPulling="2026-02-27 16:42:13.483144291 +0000 UTC m=+1095.630158738" lastFinishedPulling="2026-02-27 16:42:16.768636973 +0000 UTC m=+1098.915651430" observedRunningTime="2026-02-27 16:42:17.516906124 +0000 UTC m=+1099.663920571" watchObservedRunningTime="2026-02-27 16:42:17.519827052 +0000 UTC m=+1099.666841499" Feb 27 16:42:19 crc kubenswrapper[4751]: I0227 16:42:19.951061 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-svfnj"] Feb 27 16:42:19 crc kubenswrapper[4751]: I0227 16:42:19.951975 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" Feb 27 16:42:19 crc kubenswrapper[4751]: I0227 16:42:19.954254 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Feb 27 16:42:19 crc kubenswrapper[4751]: I0227 16:42:19.954987 4751 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-lgtd8" Feb 27 16:42:19 crc kubenswrapper[4751]: I0227 16:42:19.956338 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Feb 27 16:42:19 crc kubenswrapper[4751]: I0227 16:42:19.971046 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-svfnj"] Feb 27 16:42:20 crc kubenswrapper[4751]: I0227 16:42:20.053380 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtkm5\" (UniqueName: \"kubernetes.io/projected/fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c-kube-api-access-qtkm5\") pod \"cert-manager-webhook-6888856db4-svfnj\" (UID: \"fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c\") " pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" Feb 27 16:42:20 crc kubenswrapper[4751]: I0227 16:42:20.053484 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-svfnj\" (UID: \"fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c\") " pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" Feb 27 16:42:20 crc kubenswrapper[4751]: I0227 16:42:20.154603 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtkm5\" (UniqueName: \"kubernetes.io/projected/fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c-kube-api-access-qtkm5\") pod \"cert-manager-webhook-6888856db4-svfnj\" (UID: \"fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c\") " pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" Feb 27 16:42:20 crc kubenswrapper[4751]: I0227 16:42:20.154722 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-svfnj\" (UID: \"fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c\") " pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" Feb 27 16:42:20 crc kubenswrapper[4751]: I0227 16:42:20.172022 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-svfnj\" (UID: \"fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c\") " pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" Feb 27 16:42:20 crc kubenswrapper[4751]: I0227 16:42:20.172925 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtkm5\" (UniqueName: \"kubernetes.io/projected/fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c-kube-api-access-qtkm5\") pod \"cert-manager-webhook-6888856db4-svfnj\" (UID: \"fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c\") " pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" Feb 27 16:42:20 crc kubenswrapper[4751]: I0227 16:42:20.272890 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" Feb 27 16:42:20 crc kubenswrapper[4751]: I0227 16:42:20.676930 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-svfnj"] Feb 27 16:42:21 crc kubenswrapper[4751]: I0227 16:42:21.519538 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" event={"ID":"fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c","Type":"ContainerStarted","Data":"adf76da291fbcda0e51636f7bd7a40d4aabc2da0b4d1806d12e2e9a285b408e8"} Feb 27 16:42:23 crc kubenswrapper[4751]: I0227 16:42:23.712736 4751 scope.go:117] "RemoveContainer" containerID="0788dc7b2d935a2b967b3f651b7d5955608b2c950c534d0711fa151db94aae07" Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.556245 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" event={"ID":"fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c","Type":"ContainerStarted","Data":"b42a24150850302596f9236b77a3b862252537609a08db5409b81b3b1bf65298"} Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.556597 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.586447 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" podStartSLOduration=2.30513311 podStartE2EDuration="6.586394682s" podCreationTimestamp="2026-02-27 16:42:19 +0000 UTC" firstStartedPulling="2026-02-27 16:42:20.683120908 +0000 UTC m=+1102.830135365" lastFinishedPulling="2026-02-27 16:42:24.96438249 +0000 UTC m=+1107.111396937" observedRunningTime="2026-02-27 16:42:25.580717511 +0000 UTC m=+1107.727731968" watchObservedRunningTime="2026-02-27 16:42:25.586394682 +0000 UTC m=+1107.733409159" Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.739318 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-j4nqw"] Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.740582 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.747566 4751 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-p64lx" Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.755552 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-j4nqw"] Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.758238 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b89ea261-77e6-4d3d-92ac-d538d32ecea0-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-j4nqw\" (UID: \"b89ea261-77e6-4d3d-92ac-d538d32ecea0\") " pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.758499 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntq2m\" (UniqueName: \"kubernetes.io/projected/b89ea261-77e6-4d3d-92ac-d538d32ecea0-kube-api-access-ntq2m\") pod \"cert-manager-cainjector-5545bd876-j4nqw\" (UID: \"b89ea261-77e6-4d3d-92ac-d538d32ecea0\") " pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.860079 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b89ea261-77e6-4d3d-92ac-d538d32ecea0-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-j4nqw\" (UID: \"b89ea261-77e6-4d3d-92ac-d538d32ecea0\") " pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.860168 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntq2m\" (UniqueName: \"kubernetes.io/projected/b89ea261-77e6-4d3d-92ac-d538d32ecea0-kube-api-access-ntq2m\") pod \"cert-manager-cainjector-5545bd876-j4nqw\" (UID: \"b89ea261-77e6-4d3d-92ac-d538d32ecea0\") " pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.885834 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntq2m\" (UniqueName: \"kubernetes.io/projected/b89ea261-77e6-4d3d-92ac-d538d32ecea0-kube-api-access-ntq2m\") pod \"cert-manager-cainjector-5545bd876-j4nqw\" (UID: \"b89ea261-77e6-4d3d-92ac-d538d32ecea0\") " pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" Feb 27 16:42:25 crc kubenswrapper[4751]: I0227 16:42:25.887237 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b89ea261-77e6-4d3d-92ac-d538d32ecea0-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-j4nqw\" (UID: \"b89ea261-77e6-4d3d-92ac-d538d32ecea0\") " pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" Feb 27 16:42:26 crc kubenswrapper[4751]: I0227 16:42:26.069019 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" Feb 27 16:42:26 crc kubenswrapper[4751]: I0227 16:42:26.360730 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-j4nqw"] Feb 27 16:42:26 crc kubenswrapper[4751]: W0227 16:42:26.367161 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb89ea261_77e6_4d3d_92ac_d538d32ecea0.slice/crio-77cb6355ab91547d11998f80f0550e49e68bd9621a23f02359955e4bba853391 WatchSource:0}: Error finding container 77cb6355ab91547d11998f80f0550e49e68bd9621a23f02359955e4bba853391: Status 404 returned error can't find the container with id 77cb6355ab91547d11998f80f0550e49e68bd9621a23f02359955e4bba853391 Feb 27 16:42:26 crc kubenswrapper[4751]: I0227 16:42:26.565714 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" event={"ID":"b89ea261-77e6-4d3d-92ac-d538d32ecea0","Type":"ContainerStarted","Data":"feb6f2248f2b1c0d8a3ae85717d2c5d495abec1dcc21f183918db7c6f77dd903"} Feb 27 16:42:26 crc kubenswrapper[4751]: I0227 16:42:26.565782 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" event={"ID":"b89ea261-77e6-4d3d-92ac-d538d32ecea0","Type":"ContainerStarted","Data":"77cb6355ab91547d11998f80f0550e49e68bd9621a23f02359955e4bba853391"} Feb 27 16:42:26 crc kubenswrapper[4751]: I0227 16:42:26.596025 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-5545bd876-j4nqw" podStartSLOduration=1.595991407 podStartE2EDuration="1.595991407s" podCreationTimestamp="2026-02-27 16:42:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:42:26.583051972 +0000 UTC m=+1108.730066459" watchObservedRunningTime="2026-02-27 16:42:26.595991407 +0000 UTC m=+1108.743005904" Feb 27 16:42:28 crc kubenswrapper[4751]: I0227 16:42:28.918305 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:42:28 crc kubenswrapper[4751]: I0227 16:42:28.918861 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:42:30 crc kubenswrapper[4751]: I0227 16:42:30.274868 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-6888856db4-svfnj" Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.321054 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-545d4d4674-dk9z8"] Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.323312 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-545d4d4674-dk9z8" Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.326113 4751 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-pxwkn" Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.331021 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-545d4d4674-dk9z8"] Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.356568 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c4450555-cd8d-466f-80e0-6d957133213f-bound-sa-token\") pod \"cert-manager-545d4d4674-dk9z8\" (UID: \"c4450555-cd8d-466f-80e0-6d957133213f\") " pod="cert-manager/cert-manager-545d4d4674-dk9z8" Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.356761 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hplpp\" (UniqueName: \"kubernetes.io/projected/c4450555-cd8d-466f-80e0-6d957133213f-kube-api-access-hplpp\") pod \"cert-manager-545d4d4674-dk9z8\" (UID: \"c4450555-cd8d-466f-80e0-6d957133213f\") " pod="cert-manager/cert-manager-545d4d4674-dk9z8" Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.457332 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hplpp\" (UniqueName: \"kubernetes.io/projected/c4450555-cd8d-466f-80e0-6d957133213f-kube-api-access-hplpp\") pod \"cert-manager-545d4d4674-dk9z8\" (UID: \"c4450555-cd8d-466f-80e0-6d957133213f\") " pod="cert-manager/cert-manager-545d4d4674-dk9z8" Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.457647 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c4450555-cd8d-466f-80e0-6d957133213f-bound-sa-token\") pod \"cert-manager-545d4d4674-dk9z8\" (UID: \"c4450555-cd8d-466f-80e0-6d957133213f\") " pod="cert-manager/cert-manager-545d4d4674-dk9z8" Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.476504 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c4450555-cd8d-466f-80e0-6d957133213f-bound-sa-token\") pod \"cert-manager-545d4d4674-dk9z8\" (UID: \"c4450555-cd8d-466f-80e0-6d957133213f\") " pod="cert-manager/cert-manager-545d4d4674-dk9z8" Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.482726 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hplpp\" (UniqueName: \"kubernetes.io/projected/c4450555-cd8d-466f-80e0-6d957133213f-kube-api-access-hplpp\") pod \"cert-manager-545d4d4674-dk9z8\" (UID: \"c4450555-cd8d-466f-80e0-6d957133213f\") " pod="cert-manager/cert-manager-545d4d4674-dk9z8" Feb 27 16:42:39 crc kubenswrapper[4751]: I0227 16:42:39.653970 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-545d4d4674-dk9z8" Feb 27 16:42:40 crc kubenswrapper[4751]: I0227 16:42:40.102091 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-545d4d4674-dk9z8"] Feb 27 16:42:40 crc kubenswrapper[4751]: W0227 16:42:40.105453 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4450555_cd8d_466f_80e0_6d957133213f.slice/crio-71805368c936d4c1d4112d227dcabb7140034ef6d15d7c15cc0c160af29ef582 WatchSource:0}: Error finding container 71805368c936d4c1d4112d227dcabb7140034ef6d15d7c15cc0c160af29ef582: Status 404 returned error can't find the container with id 71805368c936d4c1d4112d227dcabb7140034ef6d15d7c15cc0c160af29ef582 Feb 27 16:42:40 crc kubenswrapper[4751]: I0227 16:42:40.652366 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-545d4d4674-dk9z8" event={"ID":"c4450555-cd8d-466f-80e0-6d957133213f","Type":"ContainerStarted","Data":"71805368c936d4c1d4112d227dcabb7140034ef6d15d7c15cc0c160af29ef582"} Feb 27 16:42:41 crc kubenswrapper[4751]: I0227 16:42:41.660825 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-545d4d4674-dk9z8" event={"ID":"c4450555-cd8d-466f-80e0-6d957133213f","Type":"ContainerStarted","Data":"a07bf47e1a4397d2972a23a48bc4be48600733f21fd4b467ea5ad6e65e1ae100"} Feb 27 16:42:41 crc kubenswrapper[4751]: I0227 16:42:41.687247 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-545d4d4674-dk9z8" podStartSLOduration=2.687229744 podStartE2EDuration="2.687229744s" podCreationTimestamp="2026-02-27 16:42:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:42:41.685231271 +0000 UTC m=+1123.832245768" watchObservedRunningTime="2026-02-27 16:42:41.687229744 +0000 UTC m=+1123.834244191" Feb 27 16:42:44 crc kubenswrapper[4751]: I0227 16:42:44.606244 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-5hkzz"] Feb 27 16:42:44 crc kubenswrapper[4751]: I0227 16:42:44.607735 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5hkzz" Feb 27 16:42:44 crc kubenswrapper[4751]: I0227 16:42:44.614052 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-f2tgs" Feb 27 16:42:44 crc kubenswrapper[4751]: I0227 16:42:44.614256 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Feb 27 16:42:44 crc kubenswrapper[4751]: I0227 16:42:44.614666 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Feb 27 16:42:44 crc kubenswrapper[4751]: I0227 16:42:44.617226 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-5hkzz"] Feb 27 16:42:44 crc kubenswrapper[4751]: I0227 16:42:44.623045 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnpfb\" (UniqueName: \"kubernetes.io/projected/a02950db-f295-4882-934d-4cc4e27459bc-kube-api-access-rnpfb\") pod \"openstack-operator-index-5hkzz\" (UID: \"a02950db-f295-4882-934d-4cc4e27459bc\") " pod="openstack-operators/openstack-operator-index-5hkzz" Feb 27 16:42:44 crc kubenswrapper[4751]: I0227 16:42:44.723872 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnpfb\" (UniqueName: \"kubernetes.io/projected/a02950db-f295-4882-934d-4cc4e27459bc-kube-api-access-rnpfb\") pod \"openstack-operator-index-5hkzz\" (UID: \"a02950db-f295-4882-934d-4cc4e27459bc\") " pod="openstack-operators/openstack-operator-index-5hkzz" Feb 27 16:42:44 crc kubenswrapper[4751]: I0227 16:42:44.751751 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnpfb\" (UniqueName: \"kubernetes.io/projected/a02950db-f295-4882-934d-4cc4e27459bc-kube-api-access-rnpfb\") pod \"openstack-operator-index-5hkzz\" (UID: \"a02950db-f295-4882-934d-4cc4e27459bc\") " pod="openstack-operators/openstack-operator-index-5hkzz" Feb 27 16:42:44 crc kubenswrapper[4751]: I0227 16:42:44.933068 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5hkzz" Feb 27 16:42:45 crc kubenswrapper[4751]: I0227 16:42:45.184078 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-5hkzz"] Feb 27 16:42:45 crc kubenswrapper[4751]: W0227 16:42:45.192289 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda02950db_f295_4882_934d_4cc4e27459bc.slice/crio-f35fdc02353eb89e8478259cc488400b2fe72962b1f64eca38f4b4a31c79ee8a WatchSource:0}: Error finding container f35fdc02353eb89e8478259cc488400b2fe72962b1f64eca38f4b4a31c79ee8a: Status 404 returned error can't find the container with id f35fdc02353eb89e8478259cc488400b2fe72962b1f64eca38f4b4a31c79ee8a Feb 27 16:42:45 crc kubenswrapper[4751]: I0227 16:42:45.687782 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5hkzz" event={"ID":"a02950db-f295-4882-934d-4cc4e27459bc","Type":"ContainerStarted","Data":"f35fdc02353eb89e8478259cc488400b2fe72962b1f64eca38f4b4a31c79ee8a"} Feb 27 16:42:47 crc kubenswrapper[4751]: I0227 16:42:47.940975 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-5hkzz"] Feb 27 16:42:48 crc kubenswrapper[4751]: I0227 16:42:48.569609 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-m6wq4"] Feb 27 16:42:48 crc kubenswrapper[4751]: I0227 16:42:48.571059 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-m6wq4"] Feb 27 16:42:48 crc kubenswrapper[4751]: I0227 16:42:48.571152 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-m6wq4" Feb 27 16:42:48 crc kubenswrapper[4751]: I0227 16:42:48.582191 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzf55\" (UniqueName: \"kubernetes.io/projected/6525b243-d114-45bf-ab4f-c859cdadee78-kube-api-access-mzf55\") pod \"openstack-operator-index-m6wq4\" (UID: \"6525b243-d114-45bf-ab4f-c859cdadee78\") " pod="openstack-operators/openstack-operator-index-m6wq4" Feb 27 16:42:48 crc kubenswrapper[4751]: I0227 16:42:48.684113 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzf55\" (UniqueName: \"kubernetes.io/projected/6525b243-d114-45bf-ab4f-c859cdadee78-kube-api-access-mzf55\") pod \"openstack-operator-index-m6wq4\" (UID: \"6525b243-d114-45bf-ab4f-c859cdadee78\") " pod="openstack-operators/openstack-operator-index-m6wq4" Feb 27 16:42:48 crc kubenswrapper[4751]: I0227 16:42:48.715611 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzf55\" (UniqueName: \"kubernetes.io/projected/6525b243-d114-45bf-ab4f-c859cdadee78-kube-api-access-mzf55\") pod \"openstack-operator-index-m6wq4\" (UID: \"6525b243-d114-45bf-ab4f-c859cdadee78\") " pod="openstack-operators/openstack-operator-index-m6wq4" Feb 27 16:42:48 crc kubenswrapper[4751]: I0227 16:42:48.900385 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-m6wq4" Feb 27 16:42:51 crc kubenswrapper[4751]: W0227 16:42:51.112102 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6525b243_d114_45bf_ab4f_c859cdadee78.slice/crio-cb4aa28fed783666955087a269fbd05819b548b9def91bb8dcc342f512e9f80d WatchSource:0}: Error finding container cb4aa28fed783666955087a269fbd05819b548b9def91bb8dcc342f512e9f80d: Status 404 returned error can't find the container with id cb4aa28fed783666955087a269fbd05819b548b9def91bb8dcc342f512e9f80d Feb 27 16:42:51 crc kubenswrapper[4751]: I0227 16:42:51.113857 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-m6wq4"] Feb 27 16:42:51 crc kubenswrapper[4751]: I0227 16:42:51.742036 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-m6wq4" event={"ID":"6525b243-d114-45bf-ab4f-c859cdadee78","Type":"ContainerStarted","Data":"b0015eac60b3f3268315b602255535d7637909a5ce7a999d9a4431ab78036ea8"} Feb 27 16:42:51 crc kubenswrapper[4751]: I0227 16:42:51.742131 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-m6wq4" event={"ID":"6525b243-d114-45bf-ab4f-c859cdadee78","Type":"ContainerStarted","Data":"cb4aa28fed783666955087a269fbd05819b548b9def91bb8dcc342f512e9f80d"} Feb 27 16:42:51 crc kubenswrapper[4751]: I0227 16:42:51.744832 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5hkzz" event={"ID":"a02950db-f295-4882-934d-4cc4e27459bc","Type":"ContainerStarted","Data":"5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47"} Feb 27 16:42:51 crc kubenswrapper[4751]: I0227 16:42:51.745058 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-5hkzz" podUID="a02950db-f295-4882-934d-4cc4e27459bc" containerName="registry-server" containerID="cri-o://5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47" gracePeriod=2 Feb 27 16:42:51 crc kubenswrapper[4751]: I0227 16:42:51.785573 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-m6wq4" podStartSLOduration=3.622225156 podStartE2EDuration="3.785533659s" podCreationTimestamp="2026-02-27 16:42:48 +0000 UTC" firstStartedPulling="2026-02-27 16:42:51.117217643 +0000 UTC m=+1133.264232100" lastFinishedPulling="2026-02-27 16:42:51.280526116 +0000 UTC m=+1133.427540603" observedRunningTime="2026-02-27 16:42:51.772033139 +0000 UTC m=+1133.919047616" watchObservedRunningTime="2026-02-27 16:42:51.785533659 +0000 UTC m=+1133.932548136" Feb 27 16:42:51 crc kubenswrapper[4751]: I0227 16:42:51.799047 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-5hkzz" podStartSLOduration=2.182973364 podStartE2EDuration="7.799019709s" podCreationTimestamp="2026-02-27 16:42:44 +0000 UTC" firstStartedPulling="2026-02-27 16:42:45.194775259 +0000 UTC m=+1127.341789706" lastFinishedPulling="2026-02-27 16:42:50.810821604 +0000 UTC m=+1132.957836051" observedRunningTime="2026-02-27 16:42:51.794342604 +0000 UTC m=+1133.941357081" watchObservedRunningTime="2026-02-27 16:42:51.799019709 +0000 UTC m=+1133.946034186" Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.199889 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5hkzz" Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.332100 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnpfb\" (UniqueName: \"kubernetes.io/projected/a02950db-f295-4882-934d-4cc4e27459bc-kube-api-access-rnpfb\") pod \"a02950db-f295-4882-934d-4cc4e27459bc\" (UID: \"a02950db-f295-4882-934d-4cc4e27459bc\") " Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.339018 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a02950db-f295-4882-934d-4cc4e27459bc-kube-api-access-rnpfb" (OuterVolumeSpecName: "kube-api-access-rnpfb") pod "a02950db-f295-4882-934d-4cc4e27459bc" (UID: "a02950db-f295-4882-934d-4cc4e27459bc"). InnerVolumeSpecName "kube-api-access-rnpfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.434355 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnpfb\" (UniqueName: \"kubernetes.io/projected/a02950db-f295-4882-934d-4cc4e27459bc-kube-api-access-rnpfb\") on node \"crc\" DevicePath \"\"" Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.755254 4751 generic.go:334] "Generic (PLEG): container finished" podID="a02950db-f295-4882-934d-4cc4e27459bc" containerID="5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47" exitCode=0 Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.755306 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5hkzz" Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.755312 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5hkzz" event={"ID":"a02950db-f295-4882-934d-4cc4e27459bc","Type":"ContainerDied","Data":"5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47"} Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.755356 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5hkzz" event={"ID":"a02950db-f295-4882-934d-4cc4e27459bc","Type":"ContainerDied","Data":"f35fdc02353eb89e8478259cc488400b2fe72962b1f64eca38f4b4a31c79ee8a"} Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.755453 4751 scope.go:117] "RemoveContainer" containerID="5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47" Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.778133 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-5hkzz"] Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.778596 4751 scope.go:117] "RemoveContainer" containerID="5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47" Feb 27 16:42:52 crc kubenswrapper[4751]: E0227 16:42:52.779020 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47\": container with ID starting with 5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47 not found: ID does not exist" containerID="5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47" Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.779064 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47"} err="failed to get container status \"5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47\": rpc error: code = NotFound desc = could not find container \"5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47\": container with ID starting with 5ef4e43f117599366e9816019fbd6f01e0c5b5a9065d201b02eab4cdbb3eaa47 not found: ID does not exist" Feb 27 16:42:52 crc kubenswrapper[4751]: I0227 16:42:52.783483 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-5hkzz"] Feb 27 16:42:54 crc kubenswrapper[4751]: I0227 16:42:54.536135 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a02950db-f295-4882-934d-4cc4e27459bc" path="/var/lib/kubelet/pods/a02950db-f295-4882-934d-4cc4e27459bc/volumes" Feb 27 16:42:58 crc kubenswrapper[4751]: I0227 16:42:58.901037 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-m6wq4" Feb 27 16:42:58 crc kubenswrapper[4751]: I0227 16:42:58.901877 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-m6wq4" Feb 27 16:42:58 crc kubenswrapper[4751]: I0227 16:42:58.918697 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:42:58 crc kubenswrapper[4751]: I0227 16:42:58.918743 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:42:58 crc kubenswrapper[4751]: I0227 16:42:58.918775 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:42:58 crc kubenswrapper[4751]: I0227 16:42:58.919204 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9018816dbd90d84dbf45956d038f614eb1f6863111903b50bc2958c2e12ef97b"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 16:42:58 crc kubenswrapper[4751]: I0227 16:42:58.919273 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://9018816dbd90d84dbf45956d038f614eb1f6863111903b50bc2958c2e12ef97b" gracePeriod=600 Feb 27 16:42:58 crc kubenswrapper[4751]: I0227 16:42:58.941425 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-m6wq4" Feb 27 16:42:59 crc kubenswrapper[4751]: I0227 16:42:59.816603 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="9018816dbd90d84dbf45956d038f614eb1f6863111903b50bc2958c2e12ef97b" exitCode=0 Feb 27 16:42:59 crc kubenswrapper[4751]: I0227 16:42:59.816700 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"9018816dbd90d84dbf45956d038f614eb1f6863111903b50bc2958c2e12ef97b"} Feb 27 16:42:59 crc kubenswrapper[4751]: I0227 16:42:59.816865 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"ef7e9a78c9c006f209ebb578b8c3e17b655897835e4a3ab4f6e482b486441566"} Feb 27 16:42:59 crc kubenswrapper[4751]: I0227 16:42:59.816891 4751 scope.go:117] "RemoveContainer" containerID="6eb163b225e8b4061c0a49276f7f1481358603b35f8794f8c9ade9058836265d" Feb 27 16:42:59 crc kubenswrapper[4751]: I0227 16:42:59.851819 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-m6wq4" Feb 27 16:43:00 crc kubenswrapper[4751]: I0227 16:43:00.807858 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7"] Feb 27 16:43:00 crc kubenswrapper[4751]: E0227 16:43:00.808579 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a02950db-f295-4882-934d-4cc4e27459bc" containerName="registry-server" Feb 27 16:43:00 crc kubenswrapper[4751]: I0227 16:43:00.808596 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a02950db-f295-4882-934d-4cc4e27459bc" containerName="registry-server" Feb 27 16:43:00 crc kubenswrapper[4751]: I0227 16:43:00.808774 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a02950db-f295-4882-934d-4cc4e27459bc" containerName="registry-server" Feb 27 16:43:00 crc kubenswrapper[4751]: I0227 16:43:00.809860 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:00 crc kubenswrapper[4751]: I0227 16:43:00.812399 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-wz656" Feb 27 16:43:00 crc kubenswrapper[4751]: I0227 16:43:00.814619 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7"] Feb 27 16:43:00 crc kubenswrapper[4751]: I0227 16:43:00.976480 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s22cm\" (UniqueName: \"kubernetes.io/projected/c7b68099-ffa0-4702-a816-f63c3ff1f53d-kube-api-access-s22cm\") pod \"58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:00 crc kubenswrapper[4751]: I0227 16:43:00.976619 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-bundle\") pod \"58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:00 crc kubenswrapper[4751]: I0227 16:43:00.976709 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-util\") pod \"58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.078456 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s22cm\" (UniqueName: \"kubernetes.io/projected/c7b68099-ffa0-4702-a816-f63c3ff1f53d-kube-api-access-s22cm\") pod \"58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.078558 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-bundle\") pod \"58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.078585 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-util\") pod \"58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.079167 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-util\") pod \"58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.079339 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-bundle\") pod \"58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.107472 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s22cm\" (UniqueName: \"kubernetes.io/projected/c7b68099-ffa0-4702-a816-f63c3ff1f53d-kube-api-access-s22cm\") pod \"58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.153327 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.401263 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7"] Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.838237 4751 generic.go:334] "Generic (PLEG): container finished" podID="c7b68099-ffa0-4702-a816-f63c3ff1f53d" containerID="9185a87ee37df53b94c2d67bbea127522c0511e9edf18485e3e004c69502d158" exitCode=0 Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.838352 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" event={"ID":"c7b68099-ffa0-4702-a816-f63c3ff1f53d","Type":"ContainerDied","Data":"9185a87ee37df53b94c2d67bbea127522c0511e9edf18485e3e004c69502d158"} Feb 27 16:43:01 crc kubenswrapper[4751]: I0227 16:43:01.838393 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" event={"ID":"c7b68099-ffa0-4702-a816-f63c3ff1f53d","Type":"ContainerStarted","Data":"fed76cd947b7fc953139cc8c08c69f95b34d13d7d152048730582405e737029a"} Feb 27 16:43:02 crc kubenswrapper[4751]: I0227 16:43:02.850507 4751 generic.go:334] "Generic (PLEG): container finished" podID="c7b68099-ffa0-4702-a816-f63c3ff1f53d" containerID="62556de05a1b9b18d883fd0dd68d00577cf22fabd7c5acaa64dc0e944dab5d99" exitCode=0 Feb 27 16:43:02 crc kubenswrapper[4751]: I0227 16:43:02.850631 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" event={"ID":"c7b68099-ffa0-4702-a816-f63c3ff1f53d","Type":"ContainerDied","Data":"62556de05a1b9b18d883fd0dd68d00577cf22fabd7c5acaa64dc0e944dab5d99"} Feb 27 16:43:03 crc kubenswrapper[4751]: I0227 16:43:03.861549 4751 generic.go:334] "Generic (PLEG): container finished" podID="c7b68099-ffa0-4702-a816-f63c3ff1f53d" containerID="54ee4e4f90a3fdec28dff7c9a1cfbfd3470f2312ae5ddc110c99d8c04a6f00b6" exitCode=0 Feb 27 16:43:03 crc kubenswrapper[4751]: I0227 16:43:03.861627 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" event={"ID":"c7b68099-ffa0-4702-a816-f63c3ff1f53d","Type":"ContainerDied","Data":"54ee4e4f90a3fdec28dff7c9a1cfbfd3470f2312ae5ddc110c99d8c04a6f00b6"} Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.190686 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.342493 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s22cm\" (UniqueName: \"kubernetes.io/projected/c7b68099-ffa0-4702-a816-f63c3ff1f53d-kube-api-access-s22cm\") pod \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.342649 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-util\") pod \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.342690 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-bundle\") pod \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\" (UID: \"c7b68099-ffa0-4702-a816-f63c3ff1f53d\") " Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.344349 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-bundle" (OuterVolumeSpecName: "bundle") pod "c7b68099-ffa0-4702-a816-f63c3ff1f53d" (UID: "c7b68099-ffa0-4702-a816-f63c3ff1f53d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.353007 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7b68099-ffa0-4702-a816-f63c3ff1f53d-kube-api-access-s22cm" (OuterVolumeSpecName: "kube-api-access-s22cm") pod "c7b68099-ffa0-4702-a816-f63c3ff1f53d" (UID: "c7b68099-ffa0-4702-a816-f63c3ff1f53d"). InnerVolumeSpecName "kube-api-access-s22cm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.377188 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-util" (OuterVolumeSpecName: "util") pod "c7b68099-ffa0-4702-a816-f63c3ff1f53d" (UID: "c7b68099-ffa0-4702-a816-f63c3ff1f53d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.444430 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s22cm\" (UniqueName: \"kubernetes.io/projected/c7b68099-ffa0-4702-a816-f63c3ff1f53d-kube-api-access-s22cm\") on node \"crc\" DevicePath \"\"" Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.444469 4751 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-util\") on node \"crc\" DevicePath \"\"" Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.444481 4751 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c7b68099-ffa0-4702-a816-f63c3ff1f53d-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.882647 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" event={"ID":"c7b68099-ffa0-4702-a816-f63c3ff1f53d","Type":"ContainerDied","Data":"fed76cd947b7fc953139cc8c08c69f95b34d13d7d152048730582405e737029a"} Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.882718 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fed76cd947b7fc953139cc8c08c69f95b34d13d7d152048730582405e737029a" Feb 27 16:43:05 crc kubenswrapper[4751]: I0227 16:43:05.882739 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7" Feb 27 16:43:12 crc kubenswrapper[4751]: I0227 16:43:12.983593 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh"] Feb 27 16:43:12 crc kubenswrapper[4751]: E0227 16:43:12.984416 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7b68099-ffa0-4702-a816-f63c3ff1f53d" containerName="util" Feb 27 16:43:12 crc kubenswrapper[4751]: I0227 16:43:12.984437 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7b68099-ffa0-4702-a816-f63c3ff1f53d" containerName="util" Feb 27 16:43:12 crc kubenswrapper[4751]: E0227 16:43:12.984458 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7b68099-ffa0-4702-a816-f63c3ff1f53d" containerName="pull" Feb 27 16:43:12 crc kubenswrapper[4751]: I0227 16:43:12.984467 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7b68099-ffa0-4702-a816-f63c3ff1f53d" containerName="pull" Feb 27 16:43:12 crc kubenswrapper[4751]: E0227 16:43:12.984483 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7b68099-ffa0-4702-a816-f63c3ff1f53d" containerName="extract" Feb 27 16:43:12 crc kubenswrapper[4751]: I0227 16:43:12.984491 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7b68099-ffa0-4702-a816-f63c3ff1f53d" containerName="extract" Feb 27 16:43:12 crc kubenswrapper[4751]: I0227 16:43:12.984624 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7b68099-ffa0-4702-a816-f63c3ff1f53d" containerName="extract" Feb 27 16:43:12 crc kubenswrapper[4751]: I0227 16:43:12.985134 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh" Feb 27 16:43:12 crc kubenswrapper[4751]: I0227 16:43:12.987536 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-nh67d" Feb 27 16:43:13 crc kubenswrapper[4751]: I0227 16:43:13.008184 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjhs4\" (UniqueName: \"kubernetes.io/projected/8fb412a1-d193-470d-8437-fae88c40c731-kube-api-access-xjhs4\") pod \"openstack-operator-controller-init-5db4b47666-dfzxh\" (UID: \"8fb412a1-d193-470d-8437-fae88c40c731\") " pod="openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh" Feb 27 16:43:13 crc kubenswrapper[4751]: I0227 16:43:13.008886 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh"] Feb 27 16:43:13 crc kubenswrapper[4751]: I0227 16:43:13.109524 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjhs4\" (UniqueName: \"kubernetes.io/projected/8fb412a1-d193-470d-8437-fae88c40c731-kube-api-access-xjhs4\") pod \"openstack-operator-controller-init-5db4b47666-dfzxh\" (UID: \"8fb412a1-d193-470d-8437-fae88c40c731\") " pod="openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh" Feb 27 16:43:13 crc kubenswrapper[4751]: I0227 16:43:13.126771 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjhs4\" (UniqueName: \"kubernetes.io/projected/8fb412a1-d193-470d-8437-fae88c40c731-kube-api-access-xjhs4\") pod \"openstack-operator-controller-init-5db4b47666-dfzxh\" (UID: \"8fb412a1-d193-470d-8437-fae88c40c731\") " pod="openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh" Feb 27 16:43:13 crc kubenswrapper[4751]: I0227 16:43:13.300562 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh" Feb 27 16:43:13 crc kubenswrapper[4751]: I0227 16:43:13.572023 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh"] Feb 27 16:43:13 crc kubenswrapper[4751]: I0227 16:43:13.950154 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh" event={"ID":"8fb412a1-d193-470d-8437-fae88c40c731","Type":"ContainerStarted","Data":"508e0d9d900444c89a11720982548e1d28ef5f6e18bc5403508770dcdebb6b73"} Feb 27 16:43:21 crc kubenswrapper[4751]: I0227 16:43:21.005921 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh" event={"ID":"8fb412a1-d193-470d-8437-fae88c40c731","Type":"ContainerStarted","Data":"4c5cb0519b79e5ce6e31a55ddd2669e8a346d5205947158934eddf5720b5dce5"} Feb 27 16:43:21 crc kubenswrapper[4751]: I0227 16:43:21.006711 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh" Feb 27 16:43:21 crc kubenswrapper[4751]: I0227 16:43:21.046029 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh" podStartSLOduration=2.161095148 podStartE2EDuration="9.04600826s" podCreationTimestamp="2026-02-27 16:43:12 +0000 UTC" firstStartedPulling="2026-02-27 16:43:13.580696787 +0000 UTC m=+1155.727711244" lastFinishedPulling="2026-02-27 16:43:20.465609889 +0000 UTC m=+1162.612624356" observedRunningTime="2026-02-27 16:43:21.035560162 +0000 UTC m=+1163.182574609" watchObservedRunningTime="2026-02-27 16:43:21.04600826 +0000 UTC m=+1163.193022707" Feb 27 16:43:33 crc kubenswrapper[4751]: I0227 16:43:33.303775 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-5db4b47666-dfzxh" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.749959 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.752139 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.753925 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.754873 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.756935 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-lv4sj" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.757391 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-s9b5c" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.765953 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.778590 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.779592 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.782803 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-v7zjl" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.800214 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.807008 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.819864 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.820638 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.822441 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-nj9zh" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.844959 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.883842 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.885434 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.894934 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-662mq" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.910328 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgqsk\" (UniqueName: \"kubernetes.io/projected/105de0b5-2fbb-4c56-b286-6466e76e6db6-kube-api-access-fgqsk\") pod \"designate-operator-controller-manager-5d87c9d997-t6cx4\" (UID: \"105de0b5-2fbb-4c56-b286-6466e76e6db6\") " pod="openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.910396 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gk9x\" (UniqueName: \"kubernetes.io/projected/d31044d2-895d-4bb3-8af2-2cdb852fea06-kube-api-access-7gk9x\") pod \"cinder-operator-controller-manager-55d77d7b5c-6nxxw\" (UID: \"d31044d2-895d-4bb3-8af2-2cdb852fea06\") " pod="openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.910494 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z5mz\" (UniqueName: \"kubernetes.io/projected/69ae63f1-4df4-46d1-89b1-4e0c4f60d83f-kube-api-access-7z5mz\") pod \"barbican-operator-controller-manager-6db6876945-d2xqb\" (UID: \"69ae63f1-4df4-46d1-89b1-4e0c4f60d83f\") " pod="openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.938464 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.941892 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.942698 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.947795 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-prq8z" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.951073 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.951914 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.957102 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.957258 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-79d2k" Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.974983 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn"] Feb 27 16:43:50 crc kubenswrapper[4751]: I0227 16:43:50.985455 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:50.998165 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:50.999011 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.005634 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.007707 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-vdgjd" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.011481 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qfqv\" (UniqueName: \"kubernetes.io/projected/094b19ab-3e9a-4a80-b5c6-177790fd63f0-kube-api-access-7qfqv\") pod \"glance-operator-controller-manager-64db6967f8-qgwmb\" (UID: \"094b19ab-3e9a-4a80-b5c6-177790fd63f0\") " pod="openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.011538 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z5mz\" (UniqueName: \"kubernetes.io/projected/69ae63f1-4df4-46d1-89b1-4e0c4f60d83f-kube-api-access-7z5mz\") pod \"barbican-operator-controller-manager-6db6876945-d2xqb\" (UID: \"69ae63f1-4df4-46d1-89b1-4e0c4f60d83f\") " pod="openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.011584 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktsn8\" (UniqueName: \"kubernetes.io/projected/648389a6-8f01-4a6e-916e-c3b567817015-kube-api-access-ktsn8\") pod \"heat-operator-controller-manager-cf99c678f-cft6c\" (UID: \"648389a6-8f01-4a6e-916e-c3b567817015\") " pod="openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.011637 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgqsk\" (UniqueName: \"kubernetes.io/projected/105de0b5-2fbb-4c56-b286-6466e76e6db6-kube-api-access-fgqsk\") pod \"designate-operator-controller-manager-5d87c9d997-t6cx4\" (UID: \"105de0b5-2fbb-4c56-b286-6466e76e6db6\") " pod="openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.011658 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gk9x\" (UniqueName: \"kubernetes.io/projected/d31044d2-895d-4bb3-8af2-2cdb852fea06-kube-api-access-7gk9x\") pod \"cinder-operator-controller-manager-55d77d7b5c-6nxxw\" (UID: \"d31044d2-895d-4bb3-8af2-2cdb852fea06\") " pod="openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.017568 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.018351 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.028741 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-7bl7t" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.043956 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.050815 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.051762 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.055916 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgqsk\" (UniqueName: \"kubernetes.io/projected/105de0b5-2fbb-4c56-b286-6466e76e6db6-kube-api-access-fgqsk\") pod \"designate-operator-controller-manager-5d87c9d997-t6cx4\" (UID: \"105de0b5-2fbb-4c56-b286-6466e76e6db6\") " pod="openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.056010 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-bf85t" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.057367 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z5mz\" (UniqueName: \"kubernetes.io/projected/69ae63f1-4df4-46d1-89b1-4e0c4f60d83f-kube-api-access-7z5mz\") pod \"barbican-operator-controller-manager-6db6876945-d2xqb\" (UID: \"69ae63f1-4df4-46d1-89b1-4e0c4f60d83f\") " pod="openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.057670 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gk9x\" (UniqueName: \"kubernetes.io/projected/d31044d2-895d-4bb3-8af2-2cdb852fea06-kube-api-access-7gk9x\") pod \"cinder-operator-controller-manager-55d77d7b5c-6nxxw\" (UID: \"d31044d2-895d-4bb3-8af2-2cdb852fea06\") " pod="openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.058547 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.059323 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.063236 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-kdlnf" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.067460 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.078650 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.088704 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-54688575f-gltv4"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.089719 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.089764 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-54688575f-gltv4" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.096146 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-4bzhp" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.105168 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-54688575f-gltv4"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.105482 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.117295 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.117359 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.118229 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnn22\" (UniqueName: \"kubernetes.io/projected/9126808a-112b-45e2-82fc-9f71b9ac3545-kube-api-access-mnn22\") pod \"horizon-operator-controller-manager-78bc7f9bd9-vtnnn\" (UID: \"9126808a-112b-45e2-82fc-9f71b9ac3545\") " pod="openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.118290 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qfqv\" (UniqueName: \"kubernetes.io/projected/094b19ab-3e9a-4a80-b5c6-177790fd63f0-kube-api-access-7qfqv\") pod \"glance-operator-controller-manager-64db6967f8-qgwmb\" (UID: \"094b19ab-3e9a-4a80-b5c6-177790fd63f0\") " pod="openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.118315 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7xlx\" (UniqueName: \"kubernetes.io/projected/d466fdb9-e7bd-4ea1-8e4d-0a260ba3f0a0-kube-api-access-k7xlx\") pod \"ironic-operator-controller-manager-545456dc4-9vtcw\" (UID: \"d466fdb9-e7bd-4ea1-8e4d-0a260ba3f0a0\") " pod="openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.118361 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktsn8\" (UniqueName: \"kubernetes.io/projected/648389a6-8f01-4a6e-916e-c3b567817015-kube-api-access-ktsn8\") pod \"heat-operator-controller-manager-cf99c678f-cft6c\" (UID: \"648389a6-8f01-4a6e-916e-c3b567817015\") " pod="openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.118415 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.118450 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwztv\" (UniqueName: \"kubernetes.io/projected/eebbb996-efc8-4dd2-9840-da330af0ec75-kube-api-access-gwztv\") pod \"keystone-operator-controller-manager-7c789f89c6-8x8p7\" (UID: \"eebbb996-efc8-4dd2-9840-da330af0ec75\") " pod="openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.118472 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bv95m\" (UniqueName: \"kubernetes.io/projected/367c4281-7780-478a-ae73-263cf73aa15e-kube-api-access-bv95m\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.118245 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.123808 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-9m96w" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.147962 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.148797 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.149338 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktsn8\" (UniqueName: \"kubernetes.io/projected/648389a6-8f01-4a6e-916e-c3b567817015-kube-api-access-ktsn8\") pod \"heat-operator-controller-manager-cf99c678f-cft6c\" (UID: \"648389a6-8f01-4a6e-916e-c3b567817015\") " pod="openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.151331 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-2kc5w" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.163300 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qfqv\" (UniqueName: \"kubernetes.io/projected/094b19ab-3e9a-4a80-b5c6-177790fd63f0-kube-api-access-7qfqv\") pod \"glance-operator-controller-manager-64db6967f8-qgwmb\" (UID: \"094b19ab-3e9a-4a80-b5c6-177790fd63f0\") " pod="openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.163839 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.173458 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.187613 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.188709 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.192314 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-ntsbp" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.192531 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.199420 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.216185 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.216945 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.220364 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-422nt" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.223626 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.224494 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.225570 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.225645 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwztv\" (UniqueName: \"kubernetes.io/projected/eebbb996-efc8-4dd2-9840-da330af0ec75-kube-api-access-gwztv\") pod \"keystone-operator-controller-manager-7c789f89c6-8x8p7\" (UID: \"eebbb996-efc8-4dd2-9840-da330af0ec75\") " pod="openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.225671 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7j66\" (UniqueName: \"kubernetes.io/projected/789bdce1-90fe-43ec-89a2-4f0669899b1d-kube-api-access-n7j66\") pod \"neutron-operator-controller-manager-54688575f-gltv4\" (UID: \"789bdce1-90fe-43ec-89a2-4f0669899b1d\") " pod="openstack-operators/neutron-operator-controller-manager-54688575f-gltv4" Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.225770 4751 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.225810 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert podName:367c4281-7780-478a-ae73-263cf73aa15e nodeName:}" failed. No retries permitted until 2026-02-27 16:43:51.725794673 +0000 UTC m=+1193.872809120 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert") pod "infra-operator-controller-manager-f7fcc58b9-74jk2" (UID: "367c4281-7780-478a-ae73-263cf73aa15e") : secret "infra-operator-webhook-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.226042 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bv95m\" (UniqueName: \"kubernetes.io/projected/367c4281-7780-478a-ae73-263cf73aa15e-kube-api-access-bv95m\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.226190 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czhls\" (UniqueName: \"kubernetes.io/projected/26f8413e-34ed-4e45-8eec-e06ba73d1a8b-kube-api-access-czhls\") pod \"mariadb-operator-controller-manager-7b6bfb6475-jlzhk\" (UID: \"26f8413e-34ed-4e45-8eec-e06ba73d1a8b\") " pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.226269 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnn22\" (UniqueName: \"kubernetes.io/projected/9126808a-112b-45e2-82fc-9f71b9ac3545-kube-api-access-mnn22\") pod \"horizon-operator-controller-manager-78bc7f9bd9-vtnnn\" (UID: \"9126808a-112b-45e2-82fc-9f71b9ac3545\") " pod="openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.226333 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.226742 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2l9l2\" (UniqueName: \"kubernetes.io/projected/eac3a3e7-7a56-4774-8a2f-2f6998e678c1-kube-api-access-2l9l2\") pod \"nova-operator-controller-manager-74b6b5dc96-dqshb\" (UID: \"eac3a3e7-7a56-4774-8a2f-2f6998e678c1\") " pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.226771 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7xlx\" (UniqueName: \"kubernetes.io/projected/d466fdb9-e7bd-4ea1-8e4d-0a260ba3f0a0-kube-api-access-k7xlx\") pod \"ironic-operator-controller-manager-545456dc4-9vtcw\" (UID: \"d466fdb9-e7bd-4ea1-8e4d-0a260ba3f0a0\") " pod="openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.226793 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlsdj\" (UniqueName: \"kubernetes.io/projected/4cdc0be8-19db-4cab-a32a-11848fab949d-kube-api-access-qlsdj\") pod \"manila-operator-controller-manager-67d996989d-wpjmf\" (UID: \"4cdc0be8-19db-4cab-a32a-11848fab949d\") " pod="openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.231736 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-xb9f2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.246847 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.252126 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwztv\" (UniqueName: \"kubernetes.io/projected/eebbb996-efc8-4dd2-9840-da330af0ec75-kube-api-access-gwztv\") pod \"keystone-operator-controller-manager-7c789f89c6-8x8p7\" (UID: \"eebbb996-efc8-4dd2-9840-da330af0ec75\") " pod="openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.252682 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7xlx\" (UniqueName: \"kubernetes.io/projected/d466fdb9-e7bd-4ea1-8e4d-0a260ba3f0a0-kube-api-access-k7xlx\") pod \"ironic-operator-controller-manager-545456dc4-9vtcw\" (UID: \"d466fdb9-e7bd-4ea1-8e4d-0a260ba3f0a0\") " pod="openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.257352 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bv95m\" (UniqueName: \"kubernetes.io/projected/367c4281-7780-478a-ae73-263cf73aa15e-kube-api-access-bv95m\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.259611 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.266123 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnn22\" (UniqueName: \"kubernetes.io/projected/9126808a-112b-45e2-82fc-9f71b9ac3545-kube-api-access-mnn22\") pod \"horizon-operator-controller-manager-78bc7f9bd9-vtnnn\" (UID: \"9126808a-112b-45e2-82fc-9f71b9ac3545\") " pod="openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.275944 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.313205 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.319766 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-4x5q7" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.330303 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2l9l2\" (UniqueName: \"kubernetes.io/projected/eac3a3e7-7a56-4774-8a2f-2f6998e678c1-kube-api-access-2l9l2\") pod \"nova-operator-controller-manager-74b6b5dc96-dqshb\" (UID: \"eac3a3e7-7a56-4774-8a2f-2f6998e678c1\") " pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.330370 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlsdj\" (UniqueName: \"kubernetes.io/projected/4cdc0be8-19db-4cab-a32a-11848fab949d-kube-api-access-qlsdj\") pod \"manila-operator-controller-manager-67d996989d-wpjmf\" (UID: \"4cdc0be8-19db-4cab-a32a-11848fab949d\") " pod="openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.330414 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nz4q6\" (UniqueName: \"kubernetes.io/projected/6e95d387-339b-4ee7-b244-a1d82cb9f14e-kube-api-access-nz4q6\") pod \"ovn-operator-controller-manager-75684d597f-vzmw2\" (UID: \"6e95d387-339b-4ee7-b244-a1d82cb9f14e\") " pod="openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.330467 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.330502 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5h7f\" (UniqueName: \"kubernetes.io/projected/22dbe1eb-ede5-439e-b447-c79f5051a22d-kube-api-access-d5h7f\") pod \"octavia-operator-controller-manager-5d86c7ddb7-p4qmg\" (UID: \"22dbe1eb-ede5-439e-b447-c79f5051a22d\") " pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.330560 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lm4g2\" (UniqueName: \"kubernetes.io/projected/b4054232-d1c6-469a-ab62-3bc130b5535b-kube-api-access-lm4g2\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.330601 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7j66\" (UniqueName: \"kubernetes.io/projected/789bdce1-90fe-43ec-89a2-4f0669899b1d-kube-api-access-n7j66\") pod \"neutron-operator-controller-manager-54688575f-gltv4\" (UID: \"789bdce1-90fe-43ec-89a2-4f0669899b1d\") " pod="openstack-operators/neutron-operator-controller-manager-54688575f-gltv4" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.330624 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqq2x\" (UniqueName: \"kubernetes.io/projected/4a8b3476-6579-4458-ac2b-ba9795eaa9eb-kube-api-access-nqq2x\") pod \"placement-operator-controller-manager-648564c9fc-qbhxg\" (UID: \"4a8b3476-6579-4458-ac2b-ba9795eaa9eb\") " pod="openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.330645 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czhls\" (UniqueName: \"kubernetes.io/projected/26f8413e-34ed-4e45-8eec-e06ba73d1a8b-kube-api-access-czhls\") pod \"mariadb-operator-controller-manager-7b6bfb6475-jlzhk\" (UID: \"26f8413e-34ed-4e45-8eec-e06ba73d1a8b\") " pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.369778 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlsdj\" (UniqueName: \"kubernetes.io/projected/4cdc0be8-19db-4cab-a32a-11848fab949d-kube-api-access-qlsdj\") pod \"manila-operator-controller-manager-67d996989d-wpjmf\" (UID: \"4cdc0be8-19db-4cab-a32a-11848fab949d\") " pod="openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.369852 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czhls\" (UniqueName: \"kubernetes.io/projected/26f8413e-34ed-4e45-8eec-e06ba73d1a8b-kube-api-access-czhls\") pod \"mariadb-operator-controller-manager-7b6bfb6475-jlzhk\" (UID: \"26f8413e-34ed-4e45-8eec-e06ba73d1a8b\") " pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.370344 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2l9l2\" (UniqueName: \"kubernetes.io/projected/eac3a3e7-7a56-4774-8a2f-2f6998e678c1-kube-api-access-2l9l2\") pod \"nova-operator-controller-manager-74b6b5dc96-dqshb\" (UID: \"eac3a3e7-7a56-4774-8a2f-2f6998e678c1\") " pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.371473 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.377754 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.380425 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.381828 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.382177 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7j66\" (UniqueName: \"kubernetes.io/projected/789bdce1-90fe-43ec-89a2-4f0669899b1d-kube-api-access-n7j66\") pod \"neutron-operator-controller-manager-54688575f-gltv4\" (UID: \"789bdce1-90fe-43ec-89a2-4f0669899b1d\") " pod="openstack-operators/neutron-operator-controller-manager-54688575f-gltv4" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.384393 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-b4wj9" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.401457 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.401918 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.432983 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nz4q6\" (UniqueName: \"kubernetes.io/projected/6e95d387-339b-4ee7-b244-a1d82cb9f14e-kube-api-access-nz4q6\") pod \"ovn-operator-controller-manager-75684d597f-vzmw2\" (UID: \"6e95d387-339b-4ee7-b244-a1d82cb9f14e\") " pod="openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.433065 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.433104 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5h7f\" (UniqueName: \"kubernetes.io/projected/22dbe1eb-ede5-439e-b447-c79f5051a22d-kube-api-access-d5h7f\") pod \"octavia-operator-controller-manager-5d86c7ddb7-p4qmg\" (UID: \"22dbe1eb-ede5-439e-b447-c79f5051a22d\") " pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.433156 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfhg2\" (UniqueName: \"kubernetes.io/projected/a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7-kube-api-access-vfhg2\") pod \"swift-operator-controller-manager-9b9ff9f4d-4zzmq\" (UID: \"a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7\") " pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.433211 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lm4g2\" (UniqueName: \"kubernetes.io/projected/b4054232-d1c6-469a-ab62-3bc130b5535b-kube-api-access-lm4g2\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.433259 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqq2x\" (UniqueName: \"kubernetes.io/projected/4a8b3476-6579-4458-ac2b-ba9795eaa9eb-kube-api-access-nqq2x\") pod \"placement-operator-controller-manager-648564c9fc-qbhxg\" (UID: \"4a8b3476-6579-4458-ac2b-ba9795eaa9eb\") " pod="openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.435486 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr"] Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.436229 4751 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.436282 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert podName:b4054232-d1c6-469a-ab62-3bc130b5535b nodeName:}" failed. No retries permitted until 2026-02-27 16:43:51.936267253 +0000 UTC m=+1194.083281690 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" (UID: "b4054232-d1c6-469a-ab62-3bc130b5535b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.436638 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.444564 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-bkwt6" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.445008 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.445012 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.464859 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lm4g2\" (UniqueName: \"kubernetes.io/projected/b4054232-d1c6-469a-ab62-3bc130b5535b-kube-api-access-lm4g2\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.469575 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqq2x\" (UniqueName: \"kubernetes.io/projected/4a8b3476-6579-4458-ac2b-ba9795eaa9eb-kube-api-access-nqq2x\") pod \"placement-operator-controller-manager-648564c9fc-qbhxg\" (UID: \"4a8b3476-6579-4458-ac2b-ba9795eaa9eb\") " pod="openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.475167 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.476443 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.477293 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5h7f\" (UniqueName: \"kubernetes.io/projected/22dbe1eb-ede5-439e-b447-c79f5051a22d-kube-api-access-d5h7f\") pod \"octavia-operator-controller-manager-5d86c7ddb7-p4qmg\" (UID: \"22dbe1eb-ede5-439e-b447-c79f5051a22d\") " pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.483841 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-mlq29" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.484053 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nz4q6\" (UniqueName: \"kubernetes.io/projected/6e95d387-339b-4ee7-b244-a1d82cb9f14e-kube-api-access-nz4q6\") pod \"ovn-operator-controller-manager-75684d597f-vzmw2\" (UID: \"6e95d387-339b-4ee7-b244-a1d82cb9f14e\") " pod="openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.494623 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.504505 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.515753 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.516890 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.517348 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.520294 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.520538 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.521081 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-xsblp" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.538594 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87gr8\" (UniqueName: \"kubernetes.io/projected/00579c6f-e25e-4b49-b43d-50547230a24d-kube-api-access-87gr8\") pod \"telemetry-operator-controller-manager-5fdb694969-w8rl5\" (UID: \"00579c6f-e25e-4b49-b43d-50547230a24d\") " pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.538692 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzwmk\" (UniqueName: \"kubernetes.io/projected/91067468-8654-4bfd-b921-15679cf507c9-kube-api-access-lzwmk\") pod \"test-operator-controller-manager-55b5ff4dbb-tcndr\" (UID: \"91067468-8654-4bfd-b921-15679cf507c9\") " pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.538720 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfhg2\" (UniqueName: \"kubernetes.io/projected/a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7-kube-api-access-vfhg2\") pod \"swift-operator-controller-manager-9b9ff9f4d-4zzmq\" (UID: \"a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7\") " pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.540122 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-54688575f-gltv4" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.568702 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.569276 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.570343 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.572938 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.580855 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfhg2\" (UniqueName: \"kubernetes.io/projected/a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7-kube-api-access-vfhg2\") pod \"swift-operator-controller-manager-9b9ff9f4d-4zzmq\" (UID: \"a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7\") " pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.594607 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.626477 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.630297 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.627394 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.642024 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.642070 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzwmk\" (UniqueName: \"kubernetes.io/projected/91067468-8654-4bfd-b921-15679cf507c9-kube-api-access-lzwmk\") pod \"test-operator-controller-manager-55b5ff4dbb-tcndr\" (UID: \"91067468-8654-4bfd-b921-15679cf507c9\") " pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.642158 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87gr8\" (UniqueName: \"kubernetes.io/projected/00579c6f-e25e-4b49-b43d-50547230a24d-kube-api-access-87gr8\") pod \"telemetry-operator-controller-manager-5fdb694969-w8rl5\" (UID: \"00579c6f-e25e-4b49-b43d-50547230a24d\") " pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.642182 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.642212 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2x6d\" (UniqueName: \"kubernetes.io/projected/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-kube-api-access-l2x6d\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.642246 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rdth\" (UniqueName: \"kubernetes.io/projected/5bba51b5-db12-42b8-80bd-c38ff3d7bfd4-kube-api-access-4rdth\") pod \"watcher-operator-controller-manager-bccc79885-6bqxl\" (UID: \"5bba51b5-db12-42b8-80bd-c38ff3d7bfd4\") " pod="openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.655312 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk"] Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.659863 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-2n4gb" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.681089 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzwmk\" (UniqueName: \"kubernetes.io/projected/91067468-8654-4bfd-b921-15679cf507c9-kube-api-access-lzwmk\") pod \"test-operator-controller-manager-55b5ff4dbb-tcndr\" (UID: \"91067468-8654-4bfd-b921-15679cf507c9\") " pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.682386 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87gr8\" (UniqueName: \"kubernetes.io/projected/00579c6f-e25e-4b49-b43d-50547230a24d-kube-api-access-87gr8\") pod \"telemetry-operator-controller-manager-5fdb694969-w8rl5\" (UID: \"00579c6f-e25e-4b49-b43d-50547230a24d\") " pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.719267 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.735433 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.744046 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.744099 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2x6d\" (UniqueName: \"kubernetes.io/projected/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-kube-api-access-l2x6d\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.744138 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rdth\" (UniqueName: \"kubernetes.io/projected/5bba51b5-db12-42b8-80bd-c38ff3d7bfd4-kube-api-access-4rdth\") pod \"watcher-operator-controller-manager-bccc79885-6bqxl\" (UID: \"5bba51b5-db12-42b8-80bd-c38ff3d7bfd4\") " pod="openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.744169 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jcvx\" (UniqueName: \"kubernetes.io/projected/d570bae3-0595-480f-bebc-80d86a0618d3-kube-api-access-8jcvx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-kv5gk\" (UID: \"d570bae3-0595-480f-bebc-80d86a0618d3\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.744191 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.744229 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.744940 4751 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.744980 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:43:52.244965603 +0000 UTC m=+1194.391980050 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "webhook-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.745205 4751 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.745279 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:43:52.245259071 +0000 UTC m=+1194.392273518 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "metrics-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.745458 4751 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.745485 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert podName:367c4281-7780-478a-ae73-263cf73aa15e nodeName:}" failed. No retries permitted until 2026-02-27 16:43:52.745476327 +0000 UTC m=+1194.892490774 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert") pod "infra-operator-controller-manager-f7fcc58b9-74jk2" (UID: "367c4281-7780-478a-ae73-263cf73aa15e") : secret "infra-operator-webhook-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.809350 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2x6d\" (UniqueName: \"kubernetes.io/projected/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-kube-api-access-l2x6d\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.817187 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rdth\" (UniqueName: \"kubernetes.io/projected/5bba51b5-db12-42b8-80bd-c38ff3d7bfd4-kube-api-access-4rdth\") pod \"watcher-operator-controller-manager-bccc79885-6bqxl\" (UID: \"5bba51b5-db12-42b8-80bd-c38ff3d7bfd4\") " pod="openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.847475 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jcvx\" (UniqueName: \"kubernetes.io/projected/d570bae3-0595-480f-bebc-80d86a0618d3-kube-api-access-8jcvx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-kv5gk\" (UID: \"d570bae3-0595-480f-bebc-80d86a0618d3\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.881932 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jcvx\" (UniqueName: \"kubernetes.io/projected/d570bae3-0595-480f-bebc-80d86a0618d3-kube-api-access-8jcvx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-kv5gk\" (UID: \"d570bae3-0595-480f-bebc-80d86a0618d3\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.949225 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.949601 4751 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: E0227 16:43:51.949722 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert podName:b4054232-d1c6-469a-ab62-3bc130b5535b nodeName:}" failed. No retries permitted until 2026-02-27 16:43:52.949641929 +0000 UTC m=+1195.096656376 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" (UID: "b4054232-d1c6-469a-ab62-3bc130b5535b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:43:51 crc kubenswrapper[4751]: I0227 16:43:51.965869 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.042041 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl" Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.051631 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.206682 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4"] Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.254275 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.254363 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:52 crc kubenswrapper[4751]: E0227 16:43:52.254513 4751 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 27 16:43:52 crc kubenswrapper[4751]: E0227 16:43:52.254572 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:43:53.254557288 +0000 UTC m=+1195.401571735 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "metrics-server-cert" not found Feb 27 16:43:52 crc kubenswrapper[4751]: E0227 16:43:52.254717 4751 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 27 16:43:52 crc kubenswrapper[4751]: E0227 16:43:52.254825 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:43:53.254798224 +0000 UTC m=+1195.401812671 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "webhook-server-cert" not found Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.386337 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb"] Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.762449 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:43:52 crc kubenswrapper[4751]: E0227 16:43:52.762621 4751 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 27 16:43:52 crc kubenswrapper[4751]: E0227 16:43:52.762704 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert podName:367c4281-7780-478a-ae73-263cf73aa15e nodeName:}" failed. No retries permitted until 2026-02-27 16:43:54.762682403 +0000 UTC m=+1196.909696850 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert") pod "infra-operator-controller-manager-f7fcc58b9-74jk2" (UID: "367c4281-7780-478a-ae73-263cf73aa15e") : secret "infra-operator-webhook-server-cert" not found Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.888540 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw"] Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.921776 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c"] Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.922013 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw"] Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.949132 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7"] Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.961839 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf"] Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.981925 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb"] Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.984617 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:43:52 crc kubenswrapper[4751]: E0227 16:43:52.986742 4751 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:43:52 crc kubenswrapper[4751]: E0227 16:43:52.986833 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert podName:b4054232-d1c6-469a-ab62-3bc130b5535b nodeName:}" failed. No retries permitted until 2026-02-27 16:43:54.986811878 +0000 UTC m=+1197.133826325 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" (UID: "b4054232-d1c6-469a-ab62-3bc130b5535b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:43:52 crc kubenswrapper[4751]: I0227 16:43:52.995694 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-54688575f-gltv4"] Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.004952 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn"] Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.014697 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:f309cdea8084a4b1e8cbcd732d6e250fd93c55cfd1b48ba9026907c8591faab7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vfhg2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-9b9ff9f4d-4zzmq_openstack-operators(a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.016496 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" podUID="a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7" Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.016563 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2"] Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.021290 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb"] Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.026991 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg"] Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.028723 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:5592ec4a6fbe2c832d1828b51af0b907e5d733d478b6f378a9b2f6d6cf0ac505,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-czhls,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-7b6bfb6475-jlzhk_openstack-operators(26f8413e-34ed-4e45-8eec-e06ba73d1a8b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.031378 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" podUID="26f8413e-34ed-4e45-8eec-e06ba73d1a8b" Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.036152 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq"] Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.036798 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:2d59045b8d8e6f9c5483c4fdda7c5057218d553200dc4bcf26789980ac1d9abd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d5h7f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-5d86c7ddb7-p4qmg_openstack-operators(22dbe1eb-ede5-439e-b447-c79f5051a22d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.038662 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" podUID="22dbe1eb-ede5-439e-b447-c79f5051a22d" Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.045913 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk"] Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.053233 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:1b9074a4ce16396d8bd2d30a475fc8c2f004f75a023e3eef8950661e89c0bcc6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-87gr8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-5fdb694969-w8rl5_openstack-operators(00579c6f-e25e-4b49-b43d-50547230a24d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.055067 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" podUID="00579c6f-e25e-4b49-b43d-50547230a24d" Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.056605 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5"] Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.224270 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg"] Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.239990 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw" event={"ID":"d466fdb9-e7bd-4ea1-8e4d-0a260ba3f0a0","Type":"ContainerStarted","Data":"c555baee21618af0485215e8032ab5b30c4b52f7fea89cf17d859c0c02a64207"} Feb 27 16:43:53 crc kubenswrapper[4751]: W0227 16:43:53.240463 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a8b3476_6579_4458_ac2b_ba9795eaa9eb.slice/crio-410e40628cb2756ecace804ee383199a5ac7ceaa1eb132b1f69f3d0cd038fa3d WatchSource:0}: Error finding container 410e40628cb2756ecace804ee383199a5ac7ceaa1eb132b1f69f3d0cd038fa3d: Status 404 returned error can't find the container with id 410e40628cb2756ecace804ee383199a5ac7ceaa1eb132b1f69f3d0cd038fa3d Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.242191 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-54688575f-gltv4" event={"ID":"789bdce1-90fe-43ec-89a2-4f0669899b1d","Type":"ContainerStarted","Data":"2c189b1e1310636f3178b934d9331e285a4289418ac030cac6abb29fb29a5906"} Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.243630 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw" event={"ID":"d31044d2-895d-4bb3-8af2-2cdb852fea06","Type":"ContainerStarted","Data":"9a7a5035e36e2bc84794052d34f70c00300b22c0e3a09b8d5dbc31d88e4dbce4"} Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.244683 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn" event={"ID":"9126808a-112b-45e2-82fc-9f71b9ac3545","Type":"ContainerStarted","Data":"e31ad6f90f57e608effbd30e314324ca7945dd4fc4852cee91267366231e1130"} Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.245819 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2" event={"ID":"6e95d387-339b-4ee7-b244-a1d82cb9f14e","Type":"ContainerStarted","Data":"c34b40ec66ad6cb107bff8b673a6c075adf6963016922f3e0c831b5fd4911c96"} Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.247324 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk"] Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.251947 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb" event={"ID":"69ae63f1-4df4-46d1-89b1-4e0c4f60d83f","Type":"ContainerStarted","Data":"0f6abece24e95dd6ecaec1bc721f8a519a4af3767bb1557349b39be87314de23"} Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.257611 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf" event={"ID":"4cdc0be8-19db-4cab-a32a-11848fab949d","Type":"ContainerStarted","Data":"7f328755bcf6706bc5e1ffb91fcb24fb519aac0af022429a9321bbff62936bbf"} Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.259982 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" event={"ID":"00579c6f-e25e-4b49-b43d-50547230a24d","Type":"ContainerStarted","Data":"ebbd668f7268a808e2ee9a9f3ac719f1a026a0920ae9270a5276703afb464260"} Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.265967 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7" event={"ID":"eebbb996-efc8-4dd2-9840-da330af0ec75","Type":"ContainerStarted","Data":"2760058e05d71b741ed455a9abf6c18c2bf0f0ffc0619ccf2bb1286e676ab498"} Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.266195 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:1b9074a4ce16396d8bd2d30a475fc8c2f004f75a023e3eef8950661e89c0bcc6\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" podUID="00579c6f-e25e-4b49-b43d-50547230a24d" Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.267781 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" event={"ID":"26f8413e-34ed-4e45-8eec-e06ba73d1a8b","Type":"ContainerStarted","Data":"9fc6a4ebb818c4d552edec7f2ef6d99e656f985547c5d3e7ca9e746329409a21"} Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.270063 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:5592ec4a6fbe2c832d1828b51af0b907e5d733d478b6f378a9b2f6d6cf0ac505\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" podUID="26f8413e-34ed-4e45-8eec-e06ba73d1a8b" Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.270887 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" event={"ID":"22dbe1eb-ede5-439e-b447-c79f5051a22d","Type":"ContainerStarted","Data":"3100bb620aabb8c5dfc535b62bf9233b0573eb636dcc47bc10dd6a81fa0375ea"} Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.271694 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:2d59045b8d8e6f9c5483c4fdda7c5057218d553200dc4bcf26789980ac1d9abd\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" podUID="22dbe1eb-ede5-439e-b447-c79f5051a22d" Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.272276 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" event={"ID":"a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7","Type":"ContainerStarted","Data":"eaedbc616dbabb830b4aa6215da03092d021f7cc62430c677db13f68952e6b12"} Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.272716 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:f309cdea8084a4b1e8cbcd732d6e250fd93c55cfd1b48ba9026907c8591faab7\\\"\"" pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" podUID="a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7" Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.273280 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4" event={"ID":"105de0b5-2fbb-4c56-b286-6466e76e6db6","Type":"ContainerStarted","Data":"6acbb451a5b9e7fcdb23be4130aba390f76164996614259fa4df42971754b50f"} Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.274979 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" event={"ID":"eac3a3e7-7a56-4774-8a2f-2f6998e678c1","Type":"ContainerStarted","Data":"b209577a37cf2e6b9ba50957ee17353f365970529406d85b1d5013956e50c426"} Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.276416 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl"] Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.279863 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c" event={"ID":"648389a6-8f01-4a6e-916e-c3b567817015","Type":"ContainerStarted","Data":"8853a4bf440c1be9cbf455eed03ce9c8c3e4cb15c87f91bdbe05e4eb257ac61a"} Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.287420 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:9d03f03aa9a460f1fcac8875064808c03e4ecd0388873bbfb9c7dc58331f3968,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lzwmk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-55b5ff4dbb-tcndr_openstack-operators(91067468-8654-4bfd-b921-15679cf507c9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.287541 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb" event={"ID":"094b19ab-3e9a-4a80-b5c6-177790fd63f0","Type":"ContainerStarted","Data":"baab4300bebfaca068eb76d0fdcb9a0f3594425ef20cc3277c684f95641eede6"} Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.288591 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" podUID="91067468-8654-4bfd-b921-15679cf507c9" Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.290127 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8jcvx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-kv5gk_openstack-operators(d570bae3-0595-480f-bebc-80d86a0618d3): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.291917 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" podUID="d570bae3-0595-480f-bebc-80d86a0618d3" Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.294997 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr"] Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.299136 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.299713 4751 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.299800 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:43:55.299774491 +0000 UTC m=+1197.446788988 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "webhook-server-cert" not found Feb 27 16:43:53 crc kubenswrapper[4751]: I0227 16:43:53.299936 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.300083 4751 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 27 16:43:53 crc kubenswrapper[4751]: E0227 16:43:53.300126 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:43:55.30011709 +0000 UTC m=+1197.447131527 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "metrics-server-cert" not found Feb 27 16:43:53 crc kubenswrapper[4751]: W0227 16:43:53.312955 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5bba51b5_db12_42b8_80bd_c38ff3d7bfd4.slice/crio-8b9a5cdff7c777f8ae0a2fd891be92204ca0bea760b6cacfa340654652183b44 WatchSource:0}: Error finding container 8b9a5cdff7c777f8ae0a2fd891be92204ca0bea760b6cacfa340654652183b44: Status 404 returned error can't find the container with id 8b9a5cdff7c777f8ae0a2fd891be92204ca0bea760b6cacfa340654652183b44 Feb 27 16:43:54 crc kubenswrapper[4751]: I0227 16:43:54.295613 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" event={"ID":"d570bae3-0595-480f-bebc-80d86a0618d3","Type":"ContainerStarted","Data":"d9ad59131ccb0021db7950d3a8430fa052b7ff134e4db9d4c1349037fb699ff6"} Feb 27 16:43:54 crc kubenswrapper[4751]: E0227 16:43:54.297688 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" podUID="d570bae3-0595-480f-bebc-80d86a0618d3" Feb 27 16:43:54 crc kubenswrapper[4751]: I0227 16:43:54.298674 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" event={"ID":"91067468-8654-4bfd-b921-15679cf507c9","Type":"ContainerStarted","Data":"acde08d647c21e79febbefacf9916449d7daed382e0f3f0ab4aaddef797a2a5d"} Feb 27 16:43:54 crc kubenswrapper[4751]: E0227 16:43:54.300770 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:9d03f03aa9a460f1fcac8875064808c03e4ecd0388873bbfb9c7dc58331f3968\\\"\"" pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" podUID="91067468-8654-4bfd-b921-15679cf507c9" Feb 27 16:43:54 crc kubenswrapper[4751]: I0227 16:43:54.301424 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg" event={"ID":"4a8b3476-6579-4458-ac2b-ba9795eaa9eb","Type":"ContainerStarted","Data":"410e40628cb2756ecace804ee383199a5ac7ceaa1eb132b1f69f3d0cd038fa3d"} Feb 27 16:43:54 crc kubenswrapper[4751]: I0227 16:43:54.302910 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl" event={"ID":"5bba51b5-db12-42b8-80bd-c38ff3d7bfd4","Type":"ContainerStarted","Data":"8b9a5cdff7c777f8ae0a2fd891be92204ca0bea760b6cacfa340654652183b44"} Feb 27 16:43:54 crc kubenswrapper[4751]: E0227 16:43:54.303809 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:5592ec4a6fbe2c832d1828b51af0b907e5d733d478b6f378a9b2f6d6cf0ac505\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" podUID="26f8413e-34ed-4e45-8eec-e06ba73d1a8b" Feb 27 16:43:54 crc kubenswrapper[4751]: E0227 16:43:54.303949 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:1b9074a4ce16396d8bd2d30a475fc8c2f004f75a023e3eef8950661e89c0bcc6\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" podUID="00579c6f-e25e-4b49-b43d-50547230a24d" Feb 27 16:43:54 crc kubenswrapper[4751]: E0227 16:43:54.303951 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:f309cdea8084a4b1e8cbcd732d6e250fd93c55cfd1b48ba9026907c8591faab7\\\"\"" pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" podUID="a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7" Feb 27 16:43:54 crc kubenswrapper[4751]: E0227 16:43:54.305574 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:2d59045b8d8e6f9c5483c4fdda7c5057218d553200dc4bcf26789980ac1d9abd\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" podUID="22dbe1eb-ede5-439e-b447-c79f5051a22d" Feb 27 16:43:54 crc kubenswrapper[4751]: I0227 16:43:54.839954 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:43:54 crc kubenswrapper[4751]: E0227 16:43:54.840210 4751 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 27 16:43:54 crc kubenswrapper[4751]: E0227 16:43:54.840340 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert podName:367c4281-7780-478a-ae73-263cf73aa15e nodeName:}" failed. No retries permitted until 2026-02-27 16:43:58.84031247 +0000 UTC m=+1200.987326917 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert") pod "infra-operator-controller-manager-f7fcc58b9-74jk2" (UID: "367c4281-7780-478a-ae73-263cf73aa15e") : secret "infra-operator-webhook-server-cert" not found Feb 27 16:43:55 crc kubenswrapper[4751]: I0227 16:43:55.044137 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:43:55 crc kubenswrapper[4751]: E0227 16:43:55.044299 4751 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:43:55 crc kubenswrapper[4751]: E0227 16:43:55.044354 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert podName:b4054232-d1c6-469a-ab62-3bc130b5535b nodeName:}" failed. No retries permitted until 2026-02-27 16:43:59.044340189 +0000 UTC m=+1201.191354636 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" (UID: "b4054232-d1c6-469a-ab62-3bc130b5535b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:43:55 crc kubenswrapper[4751]: E0227 16:43:55.312228 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" podUID="d570bae3-0595-480f-bebc-80d86a0618d3" Feb 27 16:43:55 crc kubenswrapper[4751]: E0227 16:43:55.313121 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:9d03f03aa9a460f1fcac8875064808c03e4ecd0388873bbfb9c7dc58331f3968\\\"\"" pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" podUID="91067468-8654-4bfd-b921-15679cf507c9" Feb 27 16:43:55 crc kubenswrapper[4751]: I0227 16:43:55.347692 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:55 crc kubenswrapper[4751]: I0227 16:43:55.347802 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:55 crc kubenswrapper[4751]: E0227 16:43:55.347896 4751 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 27 16:43:55 crc kubenswrapper[4751]: E0227 16:43:55.347975 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:43:59.347954623 +0000 UTC m=+1201.494969070 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "webhook-server-cert" not found Feb 27 16:43:55 crc kubenswrapper[4751]: E0227 16:43:55.348036 4751 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 27 16:43:55 crc kubenswrapper[4751]: E0227 16:43:55.348141 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:43:59.348115357 +0000 UTC m=+1201.495129804 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "metrics-server-cert" not found Feb 27 16:43:58 crc kubenswrapper[4751]: I0227 16:43:58.904627 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:43:58 crc kubenswrapper[4751]: E0227 16:43:58.904941 4751 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 27 16:43:58 crc kubenswrapper[4751]: E0227 16:43:58.905626 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert podName:367c4281-7780-478a-ae73-263cf73aa15e nodeName:}" failed. No retries permitted until 2026-02-27 16:44:06.905608084 +0000 UTC m=+1209.052622531 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert") pod "infra-operator-controller-manager-f7fcc58b9-74jk2" (UID: "367c4281-7780-478a-ae73-263cf73aa15e") : secret "infra-operator-webhook-server-cert" not found Feb 27 16:43:59 crc kubenswrapper[4751]: I0227 16:43:59.107802 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:43:59 crc kubenswrapper[4751]: E0227 16:43:59.108002 4751 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:43:59 crc kubenswrapper[4751]: E0227 16:43:59.108066 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert podName:b4054232-d1c6-469a-ab62-3bc130b5535b nodeName:}" failed. No retries permitted until 2026-02-27 16:44:07.108048681 +0000 UTC m=+1209.255063128 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" (UID: "b4054232-d1c6-469a-ab62-3bc130b5535b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:43:59 crc kubenswrapper[4751]: I0227 16:43:59.412145 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:59 crc kubenswrapper[4751]: I0227 16:43:59.412230 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:43:59 crc kubenswrapper[4751]: E0227 16:43:59.412353 4751 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 27 16:43:59 crc kubenswrapper[4751]: E0227 16:43:59.412382 4751 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 27 16:43:59 crc kubenswrapper[4751]: E0227 16:43:59.412458 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:44:07.412441946 +0000 UTC m=+1209.559456383 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "webhook-server-cert" not found Feb 27 16:43:59 crc kubenswrapper[4751]: E0227 16:43:59.412477 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:44:07.412470367 +0000 UTC m=+1209.559484814 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "metrics-server-cert" not found Feb 27 16:44:00 crc kubenswrapper[4751]: I0227 16:44:00.137984 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536844-qtl4v"] Feb 27 16:44:00 crc kubenswrapper[4751]: I0227 16:44:00.139408 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536844-qtl4v" Feb 27 16:44:00 crc kubenswrapper[4751]: I0227 16:44:00.146096 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536844-qtl4v"] Feb 27 16:44:00 crc kubenswrapper[4751]: I0227 16:44:00.150054 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:44:00 crc kubenswrapper[4751]: I0227 16:44:00.152021 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:44:00 crc kubenswrapper[4751]: I0227 16:44:00.155285 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:44:00 crc kubenswrapper[4751]: I0227 16:44:00.327201 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvzjv\" (UniqueName: \"kubernetes.io/projected/78c6e077-9027-4c5f-a858-6b0b3328682a-kube-api-access-bvzjv\") pod \"auto-csr-approver-29536844-qtl4v\" (UID: \"78c6e077-9027-4c5f-a858-6b0b3328682a\") " pod="openshift-infra/auto-csr-approver-29536844-qtl4v" Feb 27 16:44:00 crc kubenswrapper[4751]: I0227 16:44:00.429638 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvzjv\" (UniqueName: \"kubernetes.io/projected/78c6e077-9027-4c5f-a858-6b0b3328682a-kube-api-access-bvzjv\") pod \"auto-csr-approver-29536844-qtl4v\" (UID: \"78c6e077-9027-4c5f-a858-6b0b3328682a\") " pod="openshift-infra/auto-csr-approver-29536844-qtl4v" Feb 27 16:44:00 crc kubenswrapper[4751]: I0227 16:44:00.452122 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvzjv\" (UniqueName: \"kubernetes.io/projected/78c6e077-9027-4c5f-a858-6b0b3328682a-kube-api-access-bvzjv\") pod \"auto-csr-approver-29536844-qtl4v\" (UID: \"78c6e077-9027-4c5f-a858-6b0b3328682a\") " pod="openshift-infra/auto-csr-approver-29536844-qtl4v" Feb 27 16:44:00 crc kubenswrapper[4751]: I0227 16:44:00.470990 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536844-qtl4v" Feb 27 16:44:06 crc kubenswrapper[4751]: I0227 16:44:06.938632 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:44:06 crc kubenswrapper[4751]: E0227 16:44:06.938783 4751 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 27 16:44:06 crc kubenswrapper[4751]: E0227 16:44:06.939115 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert podName:367c4281-7780-478a-ae73-263cf73aa15e nodeName:}" failed. No retries permitted until 2026-02-27 16:44:22.939102555 +0000 UTC m=+1225.086116992 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert") pod "infra-operator-controller-manager-f7fcc58b9-74jk2" (UID: "367c4281-7780-478a-ae73-263cf73aa15e") : secret "infra-operator-webhook-server-cert" not found Feb 27 16:44:07 crc kubenswrapper[4751]: I0227 16:44:07.140840 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:44:07 crc kubenswrapper[4751]: E0227 16:44:07.141011 4751 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:44:07 crc kubenswrapper[4751]: E0227 16:44:07.141079 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert podName:b4054232-d1c6-469a-ab62-3bc130b5535b nodeName:}" failed. No retries permitted until 2026-02-27 16:44:23.141060128 +0000 UTC m=+1225.288074575 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" (UID: "b4054232-d1c6-469a-ab62-3bc130b5535b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 27 16:44:07 crc kubenswrapper[4751]: I0227 16:44:07.445038 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:44:07 crc kubenswrapper[4751]: I0227 16:44:07.445116 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:44:07 crc kubenswrapper[4751]: E0227 16:44:07.445226 4751 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 27 16:44:07 crc kubenswrapper[4751]: E0227 16:44:07.445258 4751 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 27 16:44:07 crc kubenswrapper[4751]: E0227 16:44:07.445281 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:44:23.445265608 +0000 UTC m=+1225.592280055 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "metrics-server-cert" not found Feb 27 16:44:07 crc kubenswrapper[4751]: E0227 16:44:07.445424 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs podName:0b8c1cf8-d3b0-4220-bbc5-81ccf3830782 nodeName:}" failed. No retries permitted until 2026-02-27 16:44:23.445374441 +0000 UTC m=+1225.592388888 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs") pod "openstack-operator-controller-manager-789bbcd94f-grbwc" (UID: "0b8c1cf8-d3b0-4220-bbc5-81ccf3830782") : secret "webhook-server-cert" not found Feb 27 16:44:10 crc kubenswrapper[4751]: E0227 16:44:10.717486 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:172f24bd4603ac3498536a8a2c8fffb07cf9113dd52bc132778ea0aa275c6b84" Feb 27 16:44:10 crc kubenswrapper[4751]: E0227 16:44:10.718266 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:172f24bd4603ac3498536a8a2c8fffb07cf9113dd52bc132778ea0aa275c6b84,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2l9l2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-74b6b5dc96-dqshb_openstack-operators(eac3a3e7-7a56-4774-8a2f-2f6998e678c1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 27 16:44:10 crc kubenswrapper[4751]: E0227 16:44:10.719503 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" podUID="eac3a3e7-7a56-4774-8a2f-2f6998e678c1" Feb 27 16:44:11 crc kubenswrapper[4751]: I0227 16:44:11.057072 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536844-qtl4v"] Feb 27 16:44:11 crc kubenswrapper[4751]: E0227 16:44:11.461471 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:172f24bd4603ac3498536a8a2c8fffb07cf9113dd52bc132778ea0aa275c6b84\\\"\"" pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" podUID="eac3a3e7-7a56-4774-8a2f-2f6998e678c1" Feb 27 16:44:12 crc kubenswrapper[4751]: W0227 16:44:12.732218 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78c6e077_9027_4c5f_a858_6b0b3328682a.slice/crio-da4d2b7af5605f0784ab39bb0d5494c8301a03e8bd2cf7a3794917186ca0a187 WatchSource:0}: Error finding container da4d2b7af5605f0784ab39bb0d5494c8301a03e8bd2cf7a3794917186ca0a187: Status 404 returned error can't find the container with id da4d2b7af5605f0784ab39bb0d5494c8301a03e8bd2cf7a3794917186ca0a187 Feb 27 16:44:13 crc kubenswrapper[4751]: I0227 16:44:13.479361 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536844-qtl4v" event={"ID":"78c6e077-9027-4c5f-a858-6b0b3328682a","Type":"ContainerStarted","Data":"da4d2b7af5605f0784ab39bb0d5494c8301a03e8bd2cf7a3794917186ca0a187"} Feb 27 16:44:15 crc kubenswrapper[4751]: I0227 16:44:15.495782 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb" event={"ID":"69ae63f1-4df4-46d1-89b1-4e0c4f60d83f","Type":"ContainerStarted","Data":"d855cca56c92f98d88320dc4bbd601fd87c44282dd0967917fb2dfc5a5be1d25"} Feb 27 16:44:15 crc kubenswrapper[4751]: I0227 16:44:15.496823 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb" Feb 27 16:44:15 crc kubenswrapper[4751]: I0227 16:44:15.521363 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb" podStartSLOduration=10.030890103 podStartE2EDuration="25.521343113s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.438165653 +0000 UTC m=+1194.585180100" lastFinishedPulling="2026-02-27 16:44:07.928618673 +0000 UTC m=+1210.075633110" observedRunningTime="2026-02-27 16:44:15.515453016 +0000 UTC m=+1217.662467473" watchObservedRunningTime="2026-02-27 16:44:15.521343113 +0000 UTC m=+1217.668357570" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.513793 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw" event={"ID":"d31044d2-895d-4bb3-8af2-2cdb852fea06","Type":"ContainerStarted","Data":"326a271a042ce53d065ab3db7b7df3908bb173e321a084eb636fe76b38f063ed"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.514433 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.516764 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7" event={"ID":"eebbb996-efc8-4dd2-9840-da330af0ec75","Type":"ContainerStarted","Data":"774ec7571bb5bf96504f3dad03d20dddca2d76fb80cea6d029debf628d943ed4"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.517487 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.535310 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" event={"ID":"26f8413e-34ed-4e45-8eec-e06ba73d1a8b","Type":"ContainerStarted","Data":"0f58db76106269a2810a0e35db27c39f3bf5af9dfbad0297c6e64be9045f3e55"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.535586 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.545507 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb" event={"ID":"094b19ab-3e9a-4a80-b5c6-177790fd63f0","Type":"ContainerStarted","Data":"f55e1c2e5113d21c1a25e4925ae83f32987222db34b1b6af2885ae3121fac96f"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.546196 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.558774 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl" event={"ID":"5bba51b5-db12-42b8-80bd-c38ff3d7bfd4","Type":"ContainerStarted","Data":"c4244b97b6c3a5d2aa11418f6339dfc043abf1592cfc80e471c9f021b9400329"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.559690 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.567023 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw" event={"ID":"d466fdb9-e7bd-4ea1-8e4d-0a260ba3f0a0","Type":"ContainerStarted","Data":"e8472542f8e06cbb9f62e345c859bb402eb419b4447761925dcfff63f941a7a6"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.567893 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.572725 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" event={"ID":"00579c6f-e25e-4b49-b43d-50547230a24d","Type":"ContainerStarted","Data":"2ff70bf8d5ea5e35fd592c4f8884b03a5f7c65e9885df9fdcf7ed5041a6020b5"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.573299 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.578097 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf" event={"ID":"4cdc0be8-19db-4cab-a32a-11848fab949d","Type":"ContainerStarted","Data":"7d3f70188182f3b04a12a4a69d15bdc9c70bedafca49da06039191783737548e"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.578695 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.586009 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.587809 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2" event={"ID":"6e95d387-339b-4ee7-b244-a1d82cb9f14e","Type":"ContainerStarted","Data":"cb2eb1f787cbde7284a446eba0381d8899962bec9c2926110dc3ee34016643dc"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.588084 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.590836 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw" podStartSLOduration=7.86526781 podStartE2EDuration="26.590813024s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.915182558 +0000 UTC m=+1195.062197005" lastFinishedPulling="2026-02-27 16:44:11.640727782 +0000 UTC m=+1213.787742219" observedRunningTime="2026-02-27 16:44:16.579942474 +0000 UTC m=+1218.726956921" watchObservedRunningTime="2026-02-27 16:44:16.590813024 +0000 UTC m=+1218.737827471" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.597533 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg" event={"ID":"4a8b3476-6579-4458-ac2b-ba9795eaa9eb","Type":"ContainerStarted","Data":"6e9d2d65f1ed13e13748e0f0c0852dddd17e55f4fcaf29e6eaf5f9523bf18bb8"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.597731 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.639238 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-54688575f-gltv4" event={"ID":"789bdce1-90fe-43ec-89a2-4f0669899b1d","Type":"ContainerStarted","Data":"c44bc6be627efb1170dffa2f677bf78a35abc795c17e3e489206932dd8bc209f"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.640325 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-54688575f-gltv4" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.643165 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf" podStartSLOduration=10.191194316 podStartE2EDuration="26.643143899s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.943179025 +0000 UTC m=+1195.090193472" lastFinishedPulling="2026-02-27 16:44:09.395128608 +0000 UTC m=+1211.542143055" observedRunningTime="2026-02-27 16:44:16.638908486 +0000 UTC m=+1218.785922933" watchObservedRunningTime="2026-02-27 16:44:16.643143899 +0000 UTC m=+1218.790158346" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.663736 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn" event={"ID":"9126808a-112b-45e2-82fc-9f71b9ac3545","Type":"ContainerStarted","Data":"357c5054791beb6d2eb99927e89fb999ec288e9b65a48163befd518f982afa17"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.664462 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.665987 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" event={"ID":"22dbe1eb-ede5-439e-b447-c79f5051a22d","Type":"ContainerStarted","Data":"055de7ef0cd851022b2881b21a9fa4b416439781679780bcc32b3dbac279e37e"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.666351 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.697453 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" event={"ID":"a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7","Type":"ContainerStarted","Data":"506d4749d9a48ffe30c17cc82b80de4a6a6a82a0604578af229013ed6f53ef53"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.698111 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.711640 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4" event={"ID":"105de0b5-2fbb-4c56-b286-6466e76e6db6","Type":"ContainerStarted","Data":"a87820847596983c4be05b7437e3d1155d2421e92a3514ae35a596f020cf4f33"} Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.712595 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.719220 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" podStartSLOduration=4.226548808 podStartE2EDuration="26.719200377s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:53.028592382 +0000 UTC m=+1195.175606829" lastFinishedPulling="2026-02-27 16:44:15.521243951 +0000 UTC m=+1217.668258398" observedRunningTime="2026-02-27 16:44:16.71144891 +0000 UTC m=+1218.858463347" watchObservedRunningTime="2026-02-27 16:44:16.719200377 +0000 UTC m=+1218.866214824" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.761327 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl" podStartSLOduration=8.411444999 podStartE2EDuration="25.761306089s" podCreationTimestamp="2026-02-27 16:43:51 +0000 UTC" firstStartedPulling="2026-02-27 16:43:53.316065825 +0000 UTC m=+1195.463080282" lastFinishedPulling="2026-02-27 16:44:10.665926925 +0000 UTC m=+1212.812941372" observedRunningTime="2026-02-27 16:44:16.759770958 +0000 UTC m=+1218.906785405" watchObservedRunningTime="2026-02-27 16:44:16.761306089 +0000 UTC m=+1218.908320536" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.830767 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb" podStartSLOduration=5.013006114 podStartE2EDuration="26.83073435s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.974696845 +0000 UTC m=+1195.121711292" lastFinishedPulling="2026-02-27 16:44:14.792425081 +0000 UTC m=+1216.939439528" observedRunningTime="2026-02-27 16:44:16.807262584 +0000 UTC m=+1218.954277031" watchObservedRunningTime="2026-02-27 16:44:16.83073435 +0000 UTC m=+1218.977748797" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.862352 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7" podStartSLOduration=5.013692922 podStartE2EDuration="26.862320892s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.943168255 +0000 UTC m=+1195.090182692" lastFinishedPulling="2026-02-27 16:44:14.791796215 +0000 UTC m=+1216.938810662" observedRunningTime="2026-02-27 16:44:16.858578182 +0000 UTC m=+1219.005592629" watchObservedRunningTime="2026-02-27 16:44:16.862320892 +0000 UTC m=+1219.009335329" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.917602 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" podStartSLOduration=4.176863652 podStartE2EDuration="25.917575805s" podCreationTimestamp="2026-02-27 16:43:51 +0000 UTC" firstStartedPulling="2026-02-27 16:43:53.053125966 +0000 UTC m=+1195.200140413" lastFinishedPulling="2026-02-27 16:44:14.793838119 +0000 UTC m=+1216.940852566" observedRunningTime="2026-02-27 16:44:16.917458982 +0000 UTC m=+1219.064473429" watchObservedRunningTime="2026-02-27 16:44:16.917575805 +0000 UTC m=+1219.064590252" Feb 27 16:44:16 crc kubenswrapper[4751]: I0227 16:44:16.972458 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw" podStartSLOduration=9.842697584 podStartE2EDuration="26.972433177s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.943558995 +0000 UTC m=+1195.090573442" lastFinishedPulling="2026-02-27 16:44:10.073294588 +0000 UTC m=+1212.220309035" observedRunningTime="2026-02-27 16:44:16.970265119 +0000 UTC m=+1219.117279566" watchObservedRunningTime="2026-02-27 16:44:16.972433177 +0000 UTC m=+1219.119447624" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.031552 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2" podStartSLOduration=10.427323418 podStartE2EDuration="26.031523292s" podCreationTimestamp="2026-02-27 16:43:51 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.993755203 +0000 UTC m=+1195.140769650" lastFinishedPulling="2026-02-27 16:44:08.597955077 +0000 UTC m=+1210.744969524" observedRunningTime="2026-02-27 16:44:17.010133352 +0000 UTC m=+1219.157147799" watchObservedRunningTime="2026-02-27 16:44:17.031523292 +0000 UTC m=+1219.178537739" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.113520 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" podStartSLOduration=3.634457562 podStartE2EDuration="26.113498928s" podCreationTimestamp="2026-02-27 16:43:51 +0000 UTC" firstStartedPulling="2026-02-27 16:43:53.036668437 +0000 UTC m=+1195.183682884" lastFinishedPulling="2026-02-27 16:44:15.515709803 +0000 UTC m=+1217.662724250" observedRunningTime="2026-02-27 16:44:17.060609428 +0000 UTC m=+1219.207623875" watchObservedRunningTime="2026-02-27 16:44:17.113498928 +0000 UTC m=+1219.260513375" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.117711 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c" podStartSLOduration=9.392000181 podStartE2EDuration="27.11769804s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.939875767 +0000 UTC m=+1195.086890214" lastFinishedPulling="2026-02-27 16:44:10.665573626 +0000 UTC m=+1212.812588073" observedRunningTime="2026-02-27 16:44:17.101241001 +0000 UTC m=+1219.248255448" watchObservedRunningTime="2026-02-27 16:44:17.11769804 +0000 UTC m=+1219.264712487" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.194119 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-54688575f-gltv4" podStartSLOduration=10.065004051 podStartE2EDuration="27.194100036s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.991154994 +0000 UTC m=+1195.138169441" lastFinishedPulling="2026-02-27 16:44:10.120250979 +0000 UTC m=+1212.267265426" observedRunningTime="2026-02-27 16:44:17.178258814 +0000 UTC m=+1219.325273261" watchObservedRunningTime="2026-02-27 16:44:17.194100036 +0000 UTC m=+1219.341114483" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.282964 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536844-qtl4v" podStartSLOduration=14.432397964 podStartE2EDuration="17.282935455s" podCreationTimestamp="2026-02-27 16:44:00 +0000 UTC" firstStartedPulling="2026-02-27 16:44:12.739434412 +0000 UTC m=+1214.886448859" lastFinishedPulling="2026-02-27 16:44:15.589971903 +0000 UTC m=+1217.736986350" observedRunningTime="2026-02-27 16:44:17.242835956 +0000 UTC m=+1219.389850403" watchObservedRunningTime="2026-02-27 16:44:17.282935455 +0000 UTC m=+1219.429949902" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.285189 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" podStartSLOduration=3.775569754 podStartE2EDuration="26.285181955s" podCreationTimestamp="2026-02-27 16:43:51 +0000 UTC" firstStartedPulling="2026-02-27 16:43:53.014515927 +0000 UTC m=+1195.161530374" lastFinishedPulling="2026-02-27 16:44:15.524128128 +0000 UTC m=+1217.671142575" observedRunningTime="2026-02-27 16:44:17.281152687 +0000 UTC m=+1219.428167134" watchObservedRunningTime="2026-02-27 16:44:17.285181955 +0000 UTC m=+1219.432196402" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.350749 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn" podStartSLOduration=9.67595271 podStartE2EDuration="27.350731112s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.99101276 +0000 UTC m=+1195.138027207" lastFinishedPulling="2026-02-27 16:44:10.665791162 +0000 UTC m=+1212.812805609" observedRunningTime="2026-02-27 16:44:17.321091372 +0000 UTC m=+1219.468105829" watchObservedRunningTime="2026-02-27 16:44:17.350731112 +0000 UTC m=+1219.497745559" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.379864 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4" podStartSLOduration=13.231564067 podStartE2EDuration="27.379842068s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:52.36379721 +0000 UTC m=+1194.510811657" lastFinishedPulling="2026-02-27 16:44:06.512075211 +0000 UTC m=+1208.659089658" observedRunningTime="2026-02-27 16:44:17.35629881 +0000 UTC m=+1219.503313257" watchObservedRunningTime="2026-02-27 16:44:17.379842068 +0000 UTC m=+1219.526856505" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.380814 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg" podStartSLOduration=7.992445059 podStartE2EDuration="26.380807624s" podCreationTimestamp="2026-02-27 16:43:51 +0000 UTC" firstStartedPulling="2026-02-27 16:43:53.252294855 +0000 UTC m=+1195.399309302" lastFinishedPulling="2026-02-27 16:44:11.64065742 +0000 UTC m=+1213.787671867" observedRunningTime="2026-02-27 16:44:17.380010153 +0000 UTC m=+1219.527024600" watchObservedRunningTime="2026-02-27 16:44:17.380807624 +0000 UTC m=+1219.527822071" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.719066 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" event={"ID":"91067468-8654-4bfd-b921-15679cf507c9","Type":"ContainerStarted","Data":"a247512bcd7fbb2cb068154291123f1efb427923d820312d004d780462436750"} Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.720195 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.722152 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c" event={"ID":"648389a6-8f01-4a6e-916e-c3b567817015","Type":"ContainerStarted","Data":"b0d28f57d77f49d005a4e4894672113bb2afa4cae444986fa8ae1ba795b60ae5"} Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.725350 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536844-qtl4v" event={"ID":"78c6e077-9027-4c5f-a858-6b0b3328682a","Type":"ContainerStarted","Data":"23b962a77f710e938d8fdccbba41627ac2c63f6590f398c64a36726e1a306a60"} Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.731203 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" event={"ID":"d570bae3-0595-480f-bebc-80d86a0618d3","Type":"ContainerStarted","Data":"cacd318cc9c514e14739c1a8abe1f6194c17f6aea898dac2ba16292339a71715"} Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.745867 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" podStartSLOduration=4.510496195 podStartE2EDuration="26.745840784s" podCreationTimestamp="2026-02-27 16:43:51 +0000 UTC" firstStartedPulling="2026-02-27 16:43:53.287271638 +0000 UTC m=+1195.434286085" lastFinishedPulling="2026-02-27 16:44:15.522616227 +0000 UTC m=+1217.669630674" observedRunningTime="2026-02-27 16:44:17.739856104 +0000 UTC m=+1219.886870551" watchObservedRunningTime="2026-02-27 16:44:17.745840784 +0000 UTC m=+1219.892855231" Feb 27 16:44:17 crc kubenswrapper[4751]: I0227 16:44:17.764862 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kv5gk" podStartSLOduration=4.4408121959999995 podStartE2EDuration="26.7648292s" podCreationTimestamp="2026-02-27 16:43:51 +0000 UTC" firstStartedPulling="2026-02-27 16:43:53.28998381 +0000 UTC m=+1195.436998257" lastFinishedPulling="2026-02-27 16:44:15.614000814 +0000 UTC m=+1217.761015261" observedRunningTime="2026-02-27 16:44:17.761518092 +0000 UTC m=+1219.908532539" watchObservedRunningTime="2026-02-27 16:44:17.7648292 +0000 UTC m=+1219.911843647" Feb 27 16:44:18 crc kubenswrapper[4751]: I0227 16:44:18.740913 4751 generic.go:334] "Generic (PLEG): container finished" podID="78c6e077-9027-4c5f-a858-6b0b3328682a" containerID="23b962a77f710e938d8fdccbba41627ac2c63f6590f398c64a36726e1a306a60" exitCode=0 Feb 27 16:44:18 crc kubenswrapper[4751]: I0227 16:44:18.740996 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536844-qtl4v" event={"ID":"78c6e077-9027-4c5f-a858-6b0b3328682a","Type":"ContainerDied","Data":"23b962a77f710e938d8fdccbba41627ac2c63f6590f398c64a36726e1a306a60"} Feb 27 16:44:20 crc kubenswrapper[4751]: I0227 16:44:20.075988 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536844-qtl4v" Feb 27 16:44:20 crc kubenswrapper[4751]: I0227 16:44:20.170538 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvzjv\" (UniqueName: \"kubernetes.io/projected/78c6e077-9027-4c5f-a858-6b0b3328682a-kube-api-access-bvzjv\") pod \"78c6e077-9027-4c5f-a858-6b0b3328682a\" (UID: \"78c6e077-9027-4c5f-a858-6b0b3328682a\") " Feb 27 16:44:20 crc kubenswrapper[4751]: I0227 16:44:20.180317 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78c6e077-9027-4c5f-a858-6b0b3328682a-kube-api-access-bvzjv" (OuterVolumeSpecName: "kube-api-access-bvzjv") pod "78c6e077-9027-4c5f-a858-6b0b3328682a" (UID: "78c6e077-9027-4c5f-a858-6b0b3328682a"). InnerVolumeSpecName "kube-api-access-bvzjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:44:20 crc kubenswrapper[4751]: I0227 16:44:20.272627 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvzjv\" (UniqueName: \"kubernetes.io/projected/78c6e077-9027-4c5f-a858-6b0b3328682a-kube-api-access-bvzjv\") on node \"crc\" DevicePath \"\"" Feb 27 16:44:20 crc kubenswrapper[4751]: I0227 16:44:20.312233 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536838-qbq2z"] Feb 27 16:44:20 crc kubenswrapper[4751]: I0227 16:44:20.318534 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536838-qbq2z"] Feb 27 16:44:20 crc kubenswrapper[4751]: I0227 16:44:20.529074 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32fbc6ca-5072-482c-bd83-c773f3add3fa" path="/var/lib/kubelet/pods/32fbc6ca-5072-482c-bd83-c773f3add3fa/volumes" Feb 27 16:44:20 crc kubenswrapper[4751]: I0227 16:44:20.762198 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536844-qtl4v" event={"ID":"78c6e077-9027-4c5f-a858-6b0b3328682a","Type":"ContainerDied","Data":"da4d2b7af5605f0784ab39bb0d5494c8301a03e8bd2cf7a3794917186ca0a187"} Feb 27 16:44:20 crc kubenswrapper[4751]: I0227 16:44:20.762239 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da4d2b7af5605f0784ab39bb0d5494c8301a03e8bd2cf7a3794917186ca0a187" Feb 27 16:44:20 crc kubenswrapper[4751]: I0227 16:44:20.762261 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536844-qtl4v" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.082024 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-6db6876945-d2xqb" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.092242 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-55d77d7b5c-6nxxw" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.108198 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-5d87c9d997-t6cx4" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.229570 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-cf99c678f-cft6c" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.380877 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-545456dc4-9vtcw" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.404634 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7c789f89c6-8x8p7" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.448221 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-64db6967f8-qgwmb" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.507365 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-67d996989d-wpjmf" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.522782 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-7b6bfb6475-jlzhk" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.544924 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-54688575f-gltv4" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.574721 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-78bc7f9bd9-vtnnn" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.575769 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-75684d597f-vzmw2" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.597761 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-648564c9fc-qbhxg" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.641514 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-9b9ff9f4d-4zzmq" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.722116 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-55b5ff4dbb-tcndr" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.740961 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-5d86c7ddb7-p4qmg" Feb 27 16:44:21 crc kubenswrapper[4751]: I0227 16:44:21.968858 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-5fdb694969-w8rl5" Feb 27 16:44:22 crc kubenswrapper[4751]: I0227 16:44:22.048764 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-bccc79885-6bqxl" Feb 27 16:44:22 crc kubenswrapper[4751]: I0227 16:44:22.523092 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.021303 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.031138 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/367c4281-7780-478a-ae73-263cf73aa15e-cert\") pod \"infra-operator-controller-manager-f7fcc58b9-74jk2\" (UID: \"367c4281-7780-478a-ae73-263cf73aa15e\") " pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.086543 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-79d2k" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.094166 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.225842 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.233206 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4054232-d1c6-469a-ab62-3bc130b5535b-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv\" (UID: \"b4054232-d1c6-469a-ab62-3bc130b5535b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.345105 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-ntsbp" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.353836 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.531960 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.532446 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.537173 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-metrics-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.537509 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0b8c1cf8-d3b0-4220-bbc5-81ccf3830782-webhook-certs\") pod \"openstack-operator-controller-manager-789bbcd94f-grbwc\" (UID: \"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782\") " pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.594924 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2"] Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.784276 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv"] Feb 27 16:44:23 crc kubenswrapper[4751]: W0227 16:44:23.787763 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4054232_d1c6_469a_ab62_3bc130b5535b.slice/crio-a0d84e8f74e6a1ef56bd88104e26c77a0bb5e7dfd3df2562c1c46409ef8cd720 WatchSource:0}: Error finding container a0d84e8f74e6a1ef56bd88104e26c77a0bb5e7dfd3df2562c1c46409ef8cd720: Status 404 returned error can't find the container with id a0d84e8f74e6a1ef56bd88104e26c77a0bb5e7dfd3df2562c1c46409ef8cd720 Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.790101 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-xsblp" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.792139 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" event={"ID":"eac3a3e7-7a56-4774-8a2f-2f6998e678c1","Type":"ContainerStarted","Data":"3ac45e172774990e908470105a238d7a3daaccb9cf83c7a519947e48d15819e4"} Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.792455 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.793791 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" event={"ID":"367c4281-7780-478a-ae73-263cf73aa15e","Type":"ContainerStarted","Data":"0f99376b9adc11999650b97677c033f80fab82100c3a3c86e477257b392fc3b9"} Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.797814 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:44:23 crc kubenswrapper[4751]: I0227 16:44:23.811531 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" podStartSLOduration=3.7596884409999998 podStartE2EDuration="33.811506025s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:43:53.012436431 +0000 UTC m=+1195.159450878" lastFinishedPulling="2026-02-27 16:44:23.064254015 +0000 UTC m=+1225.211268462" observedRunningTime="2026-02-27 16:44:23.807315604 +0000 UTC m=+1225.954330051" watchObservedRunningTime="2026-02-27 16:44:23.811506025 +0000 UTC m=+1225.958520482" Feb 27 16:44:24 crc kubenswrapper[4751]: I0227 16:44:24.027066 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc"] Feb 27 16:44:24 crc kubenswrapper[4751]: I0227 16:44:24.806014 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" event={"ID":"b4054232-d1c6-469a-ab62-3bc130b5535b","Type":"ContainerStarted","Data":"a0d84e8f74e6a1ef56bd88104e26c77a0bb5e7dfd3df2562c1c46409ef8cd720"} Feb 27 16:44:24 crc kubenswrapper[4751]: I0227 16:44:24.809507 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" event={"ID":"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782","Type":"ContainerStarted","Data":"1f86bfa5e7ea8fe8fa8ebcd49fea517b22ee5f2ac383c7346eb35d3dbda7c205"} Feb 27 16:44:24 crc kubenswrapper[4751]: I0227 16:44:24.809562 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" event={"ID":"0b8c1cf8-d3b0-4220-bbc5-81ccf3830782","Type":"ContainerStarted","Data":"044d397d6dff7163ceb61989a1b7ecd78abffbb5389342fe8f91cabdc123772d"} Feb 27 16:44:24 crc kubenswrapper[4751]: I0227 16:44:24.810109 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:44:24 crc kubenswrapper[4751]: I0227 16:44:24.839934 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" podStartSLOduration=33.839917871 podStartE2EDuration="33.839917871s" podCreationTimestamp="2026-02-27 16:43:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:44:24.836440849 +0000 UTC m=+1226.983455306" watchObservedRunningTime="2026-02-27 16:44:24.839917871 +0000 UTC m=+1226.986932318" Feb 27 16:44:24 crc kubenswrapper[4751]: I0227 16:44:24.991470 4751 scope.go:117] "RemoveContainer" containerID="61f234fc875974581fc093b56a1b026814efff2b98cc42afb332384821bd7487" Feb 27 16:44:31 crc kubenswrapper[4751]: I0227 16:44:31.571135 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-74b6b5dc96-dqshb" Feb 27 16:44:33 crc kubenswrapper[4751]: I0227 16:44:33.804013 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-789bbcd94f-grbwc" Feb 27 16:44:44 crc kubenswrapper[4751]: I0227 16:44:44.973969 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" event={"ID":"b4054232-d1c6-469a-ab62-3bc130b5535b","Type":"ContainerStarted","Data":"a8930188f586b59b2bbfdc1969f421a2fae724e8f4f568b8912a1341493b077b"} Feb 27 16:44:44 crc kubenswrapper[4751]: I0227 16:44:44.974647 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:44:44 crc kubenswrapper[4751]: I0227 16:44:44.975802 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" event={"ID":"367c4281-7780-478a-ae73-263cf73aa15e","Type":"ContainerStarted","Data":"7b97a11a44cb67c518b731bee4a525e87203e7a297ea68077f5a8c4f0ee03a48"} Feb 27 16:44:44 crc kubenswrapper[4751]: I0227 16:44:44.975983 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:44:45 crc kubenswrapper[4751]: I0227 16:44:45.012939 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" podStartSLOduration=34.093060711 podStartE2EDuration="54.012923435s" podCreationTimestamp="2026-02-27 16:43:51 +0000 UTC" firstStartedPulling="2026-02-27 16:44:23.792093378 +0000 UTC m=+1225.939107825" lastFinishedPulling="2026-02-27 16:44:43.711956102 +0000 UTC m=+1245.858970549" observedRunningTime="2026-02-27 16:44:45.010180172 +0000 UTC m=+1247.157194629" watchObservedRunningTime="2026-02-27 16:44:45.012923435 +0000 UTC m=+1247.159937882" Feb 27 16:44:45 crc kubenswrapper[4751]: I0227 16:44:45.044639 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" podStartSLOduration=34.934418714 podStartE2EDuration="55.044580641s" podCreationTimestamp="2026-02-27 16:43:50 +0000 UTC" firstStartedPulling="2026-02-27 16:44:23.602170025 +0000 UTC m=+1225.749184472" lastFinishedPulling="2026-02-27 16:44:43.712331932 +0000 UTC m=+1245.859346399" observedRunningTime="2026-02-27 16:44:45.040565574 +0000 UTC m=+1247.187580041" watchObservedRunningTime="2026-02-27 16:44:45.044580641 +0000 UTC m=+1247.191595108" Feb 27 16:44:53 crc kubenswrapper[4751]: I0227 16:44:53.107009 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-f7fcc58b9-74jk2" Feb 27 16:44:53 crc kubenswrapper[4751]: I0227 16:44:53.362060 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.174568 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn"] Feb 27 16:45:00 crc kubenswrapper[4751]: E0227 16:45:00.175248 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78c6e077-9027-4c5f-a858-6b0b3328682a" containerName="oc" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.175264 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="78c6e077-9027-4c5f-a858-6b0b3328682a" containerName="oc" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.175519 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="78c6e077-9027-4c5f-a858-6b0b3328682a" containerName="oc" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.176107 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.178890 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.179113 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.190491 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn"] Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.347652 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2ltl\" (UniqueName: \"kubernetes.io/projected/68ad3b27-f8c4-44fe-8c10-215cf6821391-kube-api-access-r2ltl\") pod \"collect-profiles-29536845-gfnmn\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.347722 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/68ad3b27-f8c4-44fe-8c10-215cf6821391-config-volume\") pod \"collect-profiles-29536845-gfnmn\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.347824 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/68ad3b27-f8c4-44fe-8c10-215cf6821391-secret-volume\") pod \"collect-profiles-29536845-gfnmn\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.449702 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/68ad3b27-f8c4-44fe-8c10-215cf6821391-secret-volume\") pod \"collect-profiles-29536845-gfnmn\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.449818 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2ltl\" (UniqueName: \"kubernetes.io/projected/68ad3b27-f8c4-44fe-8c10-215cf6821391-kube-api-access-r2ltl\") pod \"collect-profiles-29536845-gfnmn\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.449865 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/68ad3b27-f8c4-44fe-8c10-215cf6821391-config-volume\") pod \"collect-profiles-29536845-gfnmn\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.450991 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/68ad3b27-f8c4-44fe-8c10-215cf6821391-config-volume\") pod \"collect-profiles-29536845-gfnmn\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.455988 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/68ad3b27-f8c4-44fe-8c10-215cf6821391-secret-volume\") pod \"collect-profiles-29536845-gfnmn\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.472894 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2ltl\" (UniqueName: \"kubernetes.io/projected/68ad3b27-f8c4-44fe-8c10-215cf6821391-kube-api-access-r2ltl\") pod \"collect-profiles-29536845-gfnmn\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.514338 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:00 crc kubenswrapper[4751]: W0227 16:45:00.928374 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68ad3b27_f8c4_44fe_8c10_215cf6821391.slice/crio-2722d9292fd2cc8138c1c88700de79d6f0cf2d4dad1c8073ae301e66d3687e11 WatchSource:0}: Error finding container 2722d9292fd2cc8138c1c88700de79d6f0cf2d4dad1c8073ae301e66d3687e11: Status 404 returned error can't find the container with id 2722d9292fd2cc8138c1c88700de79d6f0cf2d4dad1c8073ae301e66d3687e11 Feb 27 16:45:00 crc kubenswrapper[4751]: I0227 16:45:00.935309 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn"] Feb 27 16:45:01 crc kubenswrapper[4751]: I0227 16:45:01.122123 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" event={"ID":"68ad3b27-f8c4-44fe-8c10-215cf6821391","Type":"ContainerStarted","Data":"2722d9292fd2cc8138c1c88700de79d6f0cf2d4dad1c8073ae301e66d3687e11"} Feb 27 16:45:02 crc kubenswrapper[4751]: I0227 16:45:02.134871 4751 generic.go:334] "Generic (PLEG): container finished" podID="68ad3b27-f8c4-44fe-8c10-215cf6821391" containerID="98b9ffa2c36f40f3acfbd70378c91f00c9a0241141446b73950c445770355e21" exitCode=0 Feb 27 16:45:02 crc kubenswrapper[4751]: I0227 16:45:02.134944 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" event={"ID":"68ad3b27-f8c4-44fe-8c10-215cf6821391","Type":"ContainerDied","Data":"98b9ffa2c36f40f3acfbd70378c91f00c9a0241141446b73950c445770355e21"} Feb 27 16:45:03 crc kubenswrapper[4751]: I0227 16:45:03.448927 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:03 crc kubenswrapper[4751]: I0227 16:45:03.597557 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2ltl\" (UniqueName: \"kubernetes.io/projected/68ad3b27-f8c4-44fe-8c10-215cf6821391-kube-api-access-r2ltl\") pod \"68ad3b27-f8c4-44fe-8c10-215cf6821391\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " Feb 27 16:45:03 crc kubenswrapper[4751]: I0227 16:45:03.597602 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/68ad3b27-f8c4-44fe-8c10-215cf6821391-secret-volume\") pod \"68ad3b27-f8c4-44fe-8c10-215cf6821391\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " Feb 27 16:45:03 crc kubenswrapper[4751]: I0227 16:45:03.597747 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/68ad3b27-f8c4-44fe-8c10-215cf6821391-config-volume\") pod \"68ad3b27-f8c4-44fe-8c10-215cf6821391\" (UID: \"68ad3b27-f8c4-44fe-8c10-215cf6821391\") " Feb 27 16:45:03 crc kubenswrapper[4751]: I0227 16:45:03.598470 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68ad3b27-f8c4-44fe-8c10-215cf6821391-config-volume" (OuterVolumeSpecName: "config-volume") pod "68ad3b27-f8c4-44fe-8c10-215cf6821391" (UID: "68ad3b27-f8c4-44fe-8c10-215cf6821391"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:03 crc kubenswrapper[4751]: I0227 16:45:03.602265 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68ad3b27-f8c4-44fe-8c10-215cf6821391-kube-api-access-r2ltl" (OuterVolumeSpecName: "kube-api-access-r2ltl") pod "68ad3b27-f8c4-44fe-8c10-215cf6821391" (UID: "68ad3b27-f8c4-44fe-8c10-215cf6821391"). InnerVolumeSpecName "kube-api-access-r2ltl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:45:03 crc kubenswrapper[4751]: I0227 16:45:03.610549 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68ad3b27-f8c4-44fe-8c10-215cf6821391-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "68ad3b27-f8c4-44fe-8c10-215cf6821391" (UID: "68ad3b27-f8c4-44fe-8c10-215cf6821391"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:45:03 crc kubenswrapper[4751]: I0227 16:45:03.699342 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2ltl\" (UniqueName: \"kubernetes.io/projected/68ad3b27-f8c4-44fe-8c10-215cf6821391-kube-api-access-r2ltl\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:03 crc kubenswrapper[4751]: I0227 16:45:03.699381 4751 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/68ad3b27-f8c4-44fe-8c10-215cf6821391-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:03 crc kubenswrapper[4751]: I0227 16:45:03.699393 4751 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/68ad3b27-f8c4-44fe-8c10-215cf6821391-config-volume\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:04 crc kubenswrapper[4751]: I0227 16:45:04.148817 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" event={"ID":"68ad3b27-f8c4-44fe-8c10-215cf6821391","Type":"ContainerDied","Data":"2722d9292fd2cc8138c1c88700de79d6f0cf2d4dad1c8073ae301e66d3687e11"} Feb 27 16:45:04 crc kubenswrapper[4751]: I0227 16:45:04.148860 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2722d9292fd2cc8138c1c88700de79d6f0cf2d4dad1c8073ae301e66d3687e11" Feb 27 16:45:04 crc kubenswrapper[4751]: I0227 16:45:04.148863 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.799835 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4lcvw"] Feb 27 16:45:07 crc kubenswrapper[4751]: E0227 16:45:07.801145 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68ad3b27-f8c4-44fe-8c10-215cf6821391" containerName="collect-profiles" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.801161 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="68ad3b27-f8c4-44fe-8c10-215cf6821391" containerName="collect-profiles" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.801327 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="68ad3b27-f8c4-44fe-8c10-215cf6821391" containerName="collect-profiles" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.802220 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.804741 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-rrqjl" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.806447 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.806889 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.808783 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.856699 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4lcvw"] Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.897558 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-ct7qm"] Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.899182 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.901954 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.910280 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-ct7qm"] Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.966469 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr5mr\" (UniqueName: \"kubernetes.io/projected/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-kube-api-access-rr5mr\") pod \"dnsmasq-dns-675f4bcbfc-4lcvw\" (UID: \"82f6fa3d-4fd7-422a-9a43-a27bf97ab447\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" Feb 27 16:45:07 crc kubenswrapper[4751]: I0227 16:45:07.966535 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-config\") pod \"dnsmasq-dns-675f4bcbfc-4lcvw\" (UID: \"82f6fa3d-4fd7-422a-9a43-a27bf97ab447\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.067933 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ck27\" (UniqueName: \"kubernetes.io/projected/6d791fba-abfa-4f84-af28-a75fa6596882-kube-api-access-8ck27\") pod \"dnsmasq-dns-78dd6ddcc-ct7qm\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.068002 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-config\") pod \"dnsmasq-dns-675f4bcbfc-4lcvw\" (UID: \"82f6fa3d-4fd7-422a-9a43-a27bf97ab447\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.068164 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-config\") pod \"dnsmasq-dns-78dd6ddcc-ct7qm\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.068200 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-ct7qm\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.068439 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rr5mr\" (UniqueName: \"kubernetes.io/projected/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-kube-api-access-rr5mr\") pod \"dnsmasq-dns-675f4bcbfc-4lcvw\" (UID: \"82f6fa3d-4fd7-422a-9a43-a27bf97ab447\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.069106 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-config\") pod \"dnsmasq-dns-675f4bcbfc-4lcvw\" (UID: \"82f6fa3d-4fd7-422a-9a43-a27bf97ab447\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.091473 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr5mr\" (UniqueName: \"kubernetes.io/projected/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-kube-api-access-rr5mr\") pod \"dnsmasq-dns-675f4bcbfc-4lcvw\" (UID: \"82f6fa3d-4fd7-422a-9a43-a27bf97ab447\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.130150 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.169864 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ck27\" (UniqueName: \"kubernetes.io/projected/6d791fba-abfa-4f84-af28-a75fa6596882-kube-api-access-8ck27\") pod \"dnsmasq-dns-78dd6ddcc-ct7qm\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.169950 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-config\") pod \"dnsmasq-dns-78dd6ddcc-ct7qm\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.169977 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-ct7qm\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.171193 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-ct7qm\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.171255 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-config\") pod \"dnsmasq-dns-78dd6ddcc-ct7qm\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.195241 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ck27\" (UniqueName: \"kubernetes.io/projected/6d791fba-abfa-4f84-af28-a75fa6596882-kube-api-access-8ck27\") pod \"dnsmasq-dns-78dd6ddcc-ct7qm\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.219069 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.567160 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4lcvw"] Feb 27 16:45:08 crc kubenswrapper[4751]: I0227 16:45:08.662413 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-ct7qm"] Feb 27 16:45:08 crc kubenswrapper[4751]: W0227 16:45:08.670151 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d791fba_abfa_4f84_af28_a75fa6596882.slice/crio-2f3ea58bb05476286732895a43b017ad4fc87846eed1a4bf39c5433ccad7d8d5 WatchSource:0}: Error finding container 2f3ea58bb05476286732895a43b017ad4fc87846eed1a4bf39c5433ccad7d8d5: Status 404 returned error can't find the container with id 2f3ea58bb05476286732895a43b017ad4fc87846eed1a4bf39c5433ccad7d8d5 Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.187270 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" event={"ID":"6d791fba-abfa-4f84-af28-a75fa6596882","Type":"ContainerStarted","Data":"2f3ea58bb05476286732895a43b017ad4fc87846eed1a4bf39c5433ccad7d8d5"} Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.188999 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" event={"ID":"82f6fa3d-4fd7-422a-9a43-a27bf97ab447","Type":"ContainerStarted","Data":"bfe08ce847ca4ecf3445a3de40629334f42e31334f949c5e4c8afd04616819b1"} Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.734162 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4lcvw"] Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.753085 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-8jhlv"] Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.754128 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.768296 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-8jhlv"] Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.894823 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-config\") pod \"dnsmasq-dns-666b6646f7-8jhlv\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.894934 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-dns-svc\") pod \"dnsmasq-dns-666b6646f7-8jhlv\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.894970 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6l8q\" (UniqueName: \"kubernetes.io/projected/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-kube-api-access-p6l8q\") pod \"dnsmasq-dns-666b6646f7-8jhlv\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.998873 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-config\") pod \"dnsmasq-dns-666b6646f7-8jhlv\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:09 crc kubenswrapper[4751]: I0227 16:45:09.999003 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-dns-svc\") pod \"dnsmasq-dns-666b6646f7-8jhlv\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.000120 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-dns-svc\") pod \"dnsmasq-dns-666b6646f7-8jhlv\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.000159 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-config\") pod \"dnsmasq-dns-666b6646f7-8jhlv\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.002881 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6l8q\" (UniqueName: \"kubernetes.io/projected/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-kube-api-access-p6l8q\") pod \"dnsmasq-dns-666b6646f7-8jhlv\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.049721 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6l8q\" (UniqueName: \"kubernetes.io/projected/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-kube-api-access-p6l8q\") pod \"dnsmasq-dns-666b6646f7-8jhlv\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.071367 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.560634 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-8jhlv"] Feb 27 16:45:10 crc kubenswrapper[4751]: W0227 16:45:10.589120 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode505cef0_6aa5_4e35_ba8d_cb8797afd6e6.slice/crio-6de54e6c71bbb4289ad37ca81ff2a5b0e74eadfa06d62e6581af8daa535ea63d WatchSource:0}: Error finding container 6de54e6c71bbb4289ad37ca81ff2a5b0e74eadfa06d62e6581af8daa535ea63d: Status 404 returned error can't find the container with id 6de54e6c71bbb4289ad37ca81ff2a5b0e74eadfa06d62e6581af8daa535ea63d Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.638756 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-ct7qm"] Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.653612 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vpgr9"] Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.656424 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.668968 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vpgr9"] Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.722639 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-config\") pod \"dnsmasq-dns-57d769cc4f-vpgr9\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.722677 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dxpk\" (UniqueName: \"kubernetes.io/projected/4dd2754d-ed93-4494-ae20-b38295ba9fff-kube-api-access-9dxpk\") pod \"dnsmasq-dns-57d769cc4f-vpgr9\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.723154 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-vpgr9\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.824980 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dxpk\" (UniqueName: \"kubernetes.io/projected/4dd2754d-ed93-4494-ae20-b38295ba9fff-kube-api-access-9dxpk\") pod \"dnsmasq-dns-57d769cc4f-vpgr9\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.825021 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-config\") pod \"dnsmasq-dns-57d769cc4f-vpgr9\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.825084 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-vpgr9\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.826000 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-vpgr9\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.826462 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-config\") pod \"dnsmasq-dns-57d769cc4f-vpgr9\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.844647 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dxpk\" (UniqueName: \"kubernetes.io/projected/4dd2754d-ed93-4494-ae20-b38295ba9fff-kube-api-access-9dxpk\") pod \"dnsmasq-dns-57d769cc4f-vpgr9\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.935762 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.937131 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.947512 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.950498 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.951253 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.951446 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-5sgl5" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.965917 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.966662 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.967341 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Feb 27 16:45:10 crc kubenswrapper[4751]: I0227 16:45:10.974044 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.021516 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.130931 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.130988 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-server-conf\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.131018 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.131041 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/51a81c6a-6814-412d-b77d-e741f1f74446-pod-info\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.131074 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk5bp\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-kube-api-access-sk5bp\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.131096 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.131131 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.131150 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.131197 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.131228 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.131248 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/51a81c6a-6814-412d-b77d-e741f1f74446-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.226896 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" event={"ID":"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6","Type":"ContainerStarted","Data":"6de54e6c71bbb4289ad37ca81ff2a5b0e74eadfa06d62e6581af8daa535ea63d"} Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233352 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233435 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233462 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/51a81c6a-6814-412d-b77d-e741f1f74446-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233537 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-server-conf\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233563 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233592 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233613 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/51a81c6a-6814-412d-b77d-e741f1f74446-pod-info\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233652 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233674 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk5bp\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-kube-api-access-sk5bp\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233713 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.233736 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.234125 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.235481 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-server-conf\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.237011 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.237303 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.237366 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.238069 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.238713 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.268524 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.269749 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/51a81c6a-6814-412d-b77d-e741f1f74446-pod-info\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.269786 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/51a81c6a-6814-412d-b77d-e741f1f74446-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.270194 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.274162 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk5bp\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-kube-api-access-sk5bp\") pod \"rabbitmq-server-0\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.357462 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.380512 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vpgr9"] Feb 27 16:45:11 crc kubenswrapper[4751]: W0227 16:45:11.390189 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4dd2754d_ed93_4494_ae20_b38295ba9fff.slice/crio-2cce647053ed8757960371c5031267cfd78c5b0564bdfae1078d84853e72c0db WatchSource:0}: Error finding container 2cce647053ed8757960371c5031267cfd78c5b0564bdfae1078d84853e72c0db: Status 404 returned error can't find the container with id 2cce647053ed8757960371c5031267cfd78c5b0564bdfae1078d84853e72c0db Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.810944 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.815258 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.819751 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.820110 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.820657 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-jmptl" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.820878 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.821051 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.821191 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.821323 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.831900 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.849125 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.947896 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cecf602c-dec2-40c6-922c-bf84b707b1b9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.947986 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.948046 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.948099 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hxs5\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-kube-api-access-5hxs5\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.948122 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.948138 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.948178 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.948212 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.948264 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.948287 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cecf602c-dec2-40c6-922c-bf84b707b1b9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:11 crc kubenswrapper[4751]: I0227 16:45:11.948325 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050165 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050220 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050251 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hxs5\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-kube-api-access-5hxs5\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050277 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050302 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050332 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050378 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050425 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050452 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cecf602c-dec2-40c6-922c-bf84b707b1b9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050476 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.050500 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cecf602c-dec2-40c6-922c-bf84b707b1b9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.051816 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.051877 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.052535 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.052598 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.053131 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.053536 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.056251 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.056530 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cecf602c-dec2-40c6-922c-bf84b707b1b9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.061215 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cecf602c-dec2-40c6-922c-bf84b707b1b9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.067224 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.067982 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hxs5\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-kube-api-access-5hxs5\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.074229 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.139842 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.240240 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" event={"ID":"4dd2754d-ed93-4494-ae20-b38295ba9fff","Type":"ContainerStarted","Data":"2cce647053ed8757960371c5031267cfd78c5b0564bdfae1078d84853e72c0db"} Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.241334 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"51a81c6a-6814-412d-b77d-e741f1f74446","Type":"ContainerStarted","Data":"b47cb6ea07a0a4c6ecedec4f4e4353346f3ef593d4a975263dd6b2f1aa6f013e"} Feb 27 16:45:12 crc kubenswrapper[4751]: I0227 16:45:12.644239 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 16:45:12 crc kubenswrapper[4751]: W0227 16:45:12.654606 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcecf602c_dec2_40c6_922c_bf84b707b1b9.slice/crio-068368f05d8224f09170f56353afd8659611b4c1686490f92f6144bab5110b28 WatchSource:0}: Error finding container 068368f05d8224f09170f56353afd8659611b4c1686490f92f6144bab5110b28: Status 404 returned error can't find the container with id 068368f05d8224f09170f56353afd8659611b4c1686490f92f6144bab5110b28 Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.042677 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.044894 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.048643 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.048683 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.048742 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-ssvwq" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.048757 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.054223 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.059218 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.169066 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-kolla-config\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.169145 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-operator-scripts\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.169242 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.169272 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz55r\" (UniqueName: \"kubernetes.io/projected/253a763c-21da-4224-91a2-e3bdc6eca0e9-kube-api-access-qz55r\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.169304 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-default\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.169329 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.169361 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-generated\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.169427 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.258571 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"cecf602c-dec2-40c6-922c-bf84b707b1b9","Type":"ContainerStarted","Data":"068368f05d8224f09170f56353afd8659611b4c1686490f92f6144bab5110b28"} Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.270305 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.270346 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz55r\" (UniqueName: \"kubernetes.io/projected/253a763c-21da-4224-91a2-e3bdc6eca0e9-kube-api-access-qz55r\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.270368 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-default\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.270390 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.270427 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-generated\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.270452 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.270491 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-kolla-config\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.270528 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-operator-scripts\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.271147 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-generated\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.271526 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-default\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.271638 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.271995 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-kolla-config\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.271998 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-operator-scripts\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.277104 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.278293 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.285220 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz55r\" (UniqueName: \"kubernetes.io/projected/253a763c-21da-4224-91a2-e3bdc6eca0e9-kube-api-access-qz55r\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.291714 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " pod="openstack/openstack-galera-0" Feb 27 16:45:13 crc kubenswrapper[4751]: I0227 16:45:13.378851 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.466424 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.468430 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.470037 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-jrdpv" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.471361 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.471579 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.473702 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.493816 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.601176 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.601231 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.601295 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjtj8\" (UniqueName: \"kubernetes.io/projected/4d395a15-ded3-4216-a09e-85b0305c2225-kube-api-access-fjtj8\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.601341 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.601376 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.601532 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.601612 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.601650 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.703124 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjtj8\" (UniqueName: \"kubernetes.io/projected/4d395a15-ded3-4216-a09e-85b0305c2225-kube-api-access-fjtj8\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.703207 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.703242 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.703288 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.703331 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.703354 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.703468 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.703497 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.704354 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.705271 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.706191 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.706480 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.708500 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.709193 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.721553 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.727210 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjtj8\" (UniqueName: \"kubernetes.io/projected/4d395a15-ded3-4216-a09e-85b0305c2225-kube-api-access-fjtj8\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.731359 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.796713 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.941238 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.942290 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.947468 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.947477 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-pshbf" Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.955108 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Feb 27 16:45:14 crc kubenswrapper[4751]: I0227 16:45:14.983798 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.110361 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.110441 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-config-data\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.110502 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.110538 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh4td\" (UniqueName: \"kubernetes.io/projected/3c3834ac-6796-485b-9dec-e45cebf976df-kube-api-access-xh4td\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.110592 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-kolla-config\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.212057 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.212116 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-config-data\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.212174 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.212208 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh4td\" (UniqueName: \"kubernetes.io/projected/3c3834ac-6796-485b-9dec-e45cebf976df-kube-api-access-xh4td\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.212228 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-kolla-config\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.212927 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-kolla-config\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.214083 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-config-data\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.225153 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.226140 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.230331 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh4td\" (UniqueName: \"kubernetes.io/projected/3c3834ac-6796-485b-9dec-e45cebf976df-kube-api-access-xh4td\") pod \"memcached-0\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " pod="openstack/memcached-0" Feb 27 16:45:15 crc kubenswrapper[4751]: I0227 16:45:15.295518 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 27 16:45:16 crc kubenswrapper[4751]: I0227 16:45:16.928901 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:45:16 crc kubenswrapper[4751]: I0227 16:45:16.930320 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 27 16:45:16 crc kubenswrapper[4751]: I0227 16:45:16.932305 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-d5nsl" Feb 27 16:45:16 crc kubenswrapper[4751]: I0227 16:45:16.939934 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:45:17 crc kubenswrapper[4751]: I0227 16:45:17.039528 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqdrk\" (UniqueName: \"kubernetes.io/projected/ad38c130-da58-4681-ac04-c017147fcc6e-kube-api-access-cqdrk\") pod \"kube-state-metrics-0\" (UID: \"ad38c130-da58-4681-ac04-c017147fcc6e\") " pod="openstack/kube-state-metrics-0" Feb 27 16:45:17 crc kubenswrapper[4751]: I0227 16:45:17.141491 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqdrk\" (UniqueName: \"kubernetes.io/projected/ad38c130-da58-4681-ac04-c017147fcc6e-kube-api-access-cqdrk\") pod \"kube-state-metrics-0\" (UID: \"ad38c130-da58-4681-ac04-c017147fcc6e\") " pod="openstack/kube-state-metrics-0" Feb 27 16:45:17 crc kubenswrapper[4751]: I0227 16:45:17.165586 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqdrk\" (UniqueName: \"kubernetes.io/projected/ad38c130-da58-4681-ac04-c017147fcc6e-kube-api-access-cqdrk\") pod \"kube-state-metrics-0\" (UID: \"ad38c130-da58-4681-ac04-c017147fcc6e\") " pod="openstack/kube-state-metrics-0" Feb 27 16:45:17 crc kubenswrapper[4751]: I0227 16:45:17.250087 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.742627 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.750012 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.754123 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.754333 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.755035 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.755243 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-rdqcw" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.755387 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.764393 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.908924 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.908991 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.909023 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.909061 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-js87m\" (UniqueName: \"kubernetes.io/projected/95b4a6cb-a957-4a31-8510-292eb1305ad6-kube-api-access-js87m\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.909104 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.909149 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.909176 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:20 crc kubenswrapper[4751]: I0227 16:45:20.909197 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-config\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.011315 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.011360 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-config\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.011439 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.011472 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.011508 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.011537 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-js87m\" (UniqueName: \"kubernetes.io/projected/95b4a6cb-a957-4a31-8510-292eb1305ad6-kube-api-access-js87m\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.011597 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.011659 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.012206 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.012261 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.012499 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-config\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.012805 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.018700 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.021692 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.028942 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-js87m\" (UniqueName: \"kubernetes.io/projected/95b4a6cb-a957-4a31-8510-292eb1305ad6-kube-api-access-js87m\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.028952 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.039666 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.092281 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.750894 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-gdjfm"] Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.752209 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.755094 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.755369 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.755609 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-jx97p" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.763278 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-gdjfm"] Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.803938 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-frvvc"] Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.807473 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.819911 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-frvvc"] Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924036 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924086 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run-ovn\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924110 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-run\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924126 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-log-ovn\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924148 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-ovn-controller-tls-certs\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924251 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-lib\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924292 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrqct\" (UniqueName: \"kubernetes.io/projected/3f29e0f7-8556-4570-a115-1d1ee089479c-kube-api-access-mrqct\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924331 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-log\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924475 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f29e0f7-8556-4570-a115-1d1ee089479c-scripts\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924503 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-scripts\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924576 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-etc-ovs\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924659 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srfdq\" (UniqueName: \"kubernetes.io/projected/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-kube-api-access-srfdq\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:21 crc kubenswrapper[4751]: I0227 16:45:21.924685 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-combined-ca-bundle\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.026578 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-ovn-controller-tls-certs\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.026648 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-lib\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.027434 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-lib\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.027582 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrqct\" (UniqueName: \"kubernetes.io/projected/3f29e0f7-8556-4570-a115-1d1ee089479c-kube-api-access-mrqct\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.027682 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-log\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.027886 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f29e0f7-8556-4570-a115-1d1ee089479c-scripts\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.027938 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-scripts\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.028025 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-etc-ovs\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.028143 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srfdq\" (UniqueName: \"kubernetes.io/projected/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-kube-api-access-srfdq\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.028175 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-combined-ca-bundle\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.028370 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.028449 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run-ovn\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.028619 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-etc-ovs\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.028741 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-log\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.028779 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-log-ovn\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.028916 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.028996 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run-ovn\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.029036 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-run\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.029177 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-run\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.029285 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-log-ovn\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.031383 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-scripts\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.031436 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f29e0f7-8556-4570-a115-1d1ee089479c-scripts\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.031768 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-ovn-controller-tls-certs\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.043593 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-combined-ca-bundle\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.047241 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srfdq\" (UniqueName: \"kubernetes.io/projected/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-kube-api-access-srfdq\") pod \"ovn-controller-ovs-frvvc\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.051104 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrqct\" (UniqueName: \"kubernetes.io/projected/3f29e0f7-8556-4570-a115-1d1ee089479c-kube-api-access-mrqct\") pod \"ovn-controller-gdjfm\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.082166 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:22 crc kubenswrapper[4751]: I0227 16:45:22.130993 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.132724 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.135660 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.140192 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.140203 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.140569 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.140803 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-q6l22" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.148175 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.281273 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.281384 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwntj\" (UniqueName: \"kubernetes.io/projected/438372fd-dcc8-47e3-a547-c8a1729b2f1f-kube-api-access-hwntj\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.281486 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.281518 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-config\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.281570 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.281655 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.281747 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.281911 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.383808 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.383906 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.384013 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.384367 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.384792 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwntj\" (UniqueName: \"kubernetes.io/projected/438372fd-dcc8-47e3-a547-c8a1729b2f1f-kube-api-access-hwntj\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.384906 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.385037 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-config\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.385156 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.385433 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.385844 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.386167 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-config\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.386666 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.389235 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.391316 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.392629 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.401584 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwntj\" (UniqueName: \"kubernetes.io/projected/438372fd-dcc8-47e3-a547-c8a1729b2f1f-kube-api-access-hwntj\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.421995 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:24 crc kubenswrapper[4751]: I0227 16:45:24.493078 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:28 crc kubenswrapper[4751]: I0227 16:45:28.886945 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Feb 27 16:45:28 crc kubenswrapper[4751]: I0227 16:45:28.918468 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:45:28 crc kubenswrapper[4751]: I0227 16:45:28.918536 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:45:34 crc kubenswrapper[4751]: E0227 16:45:34.867354 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 27 16:45:34 crc kubenswrapper[4751]: E0227 16:45:34.868077 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p6l8q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-8jhlv_openstack(e505cef0-6aa5-4e35-ba8d-cb8797afd6e6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 27 16:45:34 crc kubenswrapper[4751]: E0227 16:45:34.869227 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" podUID="e505cef0-6aa5-4e35-ba8d-cb8797afd6e6" Feb 27 16:45:34 crc kubenswrapper[4751]: E0227 16:45:34.919046 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 27 16:45:34 crc kubenswrapper[4751]: E0227 16:45:34.919237 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rr5mr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-4lcvw_openstack(82f6fa3d-4fd7-422a-9a43-a27bf97ab447): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 27 16:45:34 crc kubenswrapper[4751]: E0227 16:45:34.920516 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" podUID="82f6fa3d-4fd7-422a-9a43-a27bf97ab447" Feb 27 16:45:34 crc kubenswrapper[4751]: E0227 16:45:34.965601 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 27 16:45:34 crc kubenswrapper[4751]: E0227 16:45:34.965816 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8ck27,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-ct7qm_openstack(6d791fba-abfa-4f84-af28-a75fa6596882): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 27 16:45:34 crc kubenswrapper[4751]: E0227 16:45:34.966977 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" podUID="6d791fba-abfa-4f84-af28-a75fa6596882" Feb 27 16:45:35 crc kubenswrapper[4751]: W0227 16:45:35.308575 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c3834ac_6796_485b_9dec_e45cebf976df.slice/crio-8d4fa0205354e077863339c8fee33d79b4aa4af594cd5627463fc9a54d9eef37 WatchSource:0}: Error finding container 8d4fa0205354e077863339c8fee33d79b4aa4af594cd5627463fc9a54d9eef37: Status 404 returned error can't find the container with id 8d4fa0205354e077863339c8fee33d79b4aa4af594cd5627463fc9a54d9eef37 Feb 27 16:45:35 crc kubenswrapper[4751]: I0227 16:45:35.448724 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3c3834ac-6796-485b-9dec-e45cebf976df","Type":"ContainerStarted","Data":"8d4fa0205354e077863339c8fee33d79b4aa4af594cd5627463fc9a54d9eef37"} Feb 27 16:45:35 crc kubenswrapper[4751]: E0227 16:45:35.762510 4751 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Feb 27 16:45:35 crc kubenswrapper[4751]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Feb 27 16:45:35 crc kubenswrapper[4751]: > podSandboxID="6de54e6c71bbb4289ad37ca81ff2a5b0e74eadfa06d62e6581af8daa535ea63d" Feb 27 16:45:35 crc kubenswrapper[4751]: E0227 16:45:35.763174 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 16:45:35 crc kubenswrapper[4751]: init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p6l8q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-8jhlv_openstack(e505cef0-6aa5-4e35-ba8d-cb8797afd6e6): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Feb 27 16:45:35 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 16:45:35 crc kubenswrapper[4751]: E0227 16:45:35.764457 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" podUID="e505cef0-6aa5-4e35-ba8d-cb8797afd6e6" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.035130 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.067476 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.088673 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 27 16:45:36 crc kubenswrapper[4751]: W0227 16:45:36.108132 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d395a15_ded3_4216_a09e_85b0305c2225.slice/crio-a4b4d5b0b6bb6127fab72bdb2c190cb9dffe1db9ce9611e03e378c561e013581 WatchSource:0}: Error finding container a4b4d5b0b6bb6127fab72bdb2c190cb9dffe1db9ce9611e03e378c561e013581: Status 404 returned error can't find the container with id a4b4d5b0b6bb6127fab72bdb2c190cb9dffe1db9ce9611e03e378c561e013581 Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.115252 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-dns-svc\") pod \"6d791fba-abfa-4f84-af28-a75fa6596882\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.116245 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6d791fba-abfa-4f84-af28-a75fa6596882" (UID: "6d791fba-abfa-4f84-af28-a75fa6596882"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.124459 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-config\") pod \"6d791fba-abfa-4f84-af28-a75fa6596882\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.124547 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ck27\" (UniqueName: \"kubernetes.io/projected/6d791fba-abfa-4f84-af28-a75fa6596882-kube-api-access-8ck27\") pod \"6d791fba-abfa-4f84-af28-a75fa6596882\" (UID: \"6d791fba-abfa-4f84-af28-a75fa6596882\") " Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.125062 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-config" (OuterVolumeSpecName: "config") pod "6d791fba-abfa-4f84-af28-a75fa6596882" (UID: "6d791fba-abfa-4f84-af28-a75fa6596882"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.125420 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.125442 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d791fba-abfa-4f84-af28-a75fa6596882-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.132023 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d791fba-abfa-4f84-af28-a75fa6596882-kube-api-access-8ck27" (OuterVolumeSpecName: "kube-api-access-8ck27") pod "6d791fba-abfa-4f84-af28-a75fa6596882" (UID: "6d791fba-abfa-4f84-af28-a75fa6596882"). InnerVolumeSpecName "kube-api-access-8ck27". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.178976 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.193490 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-gdjfm"] Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.217503 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.226228 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rr5mr\" (UniqueName: \"kubernetes.io/projected/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-kube-api-access-rr5mr\") pod \"82f6fa3d-4fd7-422a-9a43-a27bf97ab447\" (UID: \"82f6fa3d-4fd7-422a-9a43-a27bf97ab447\") " Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.226407 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-config\") pod \"82f6fa3d-4fd7-422a-9a43-a27bf97ab447\" (UID: \"82f6fa3d-4fd7-422a-9a43-a27bf97ab447\") " Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.226812 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ck27\" (UniqueName: \"kubernetes.io/projected/6d791fba-abfa-4f84-af28-a75fa6596882-kube-api-access-8ck27\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.227160 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-config" (OuterVolumeSpecName: "config") pod "82f6fa3d-4fd7-422a-9a43-a27bf97ab447" (UID: "82f6fa3d-4fd7-422a-9a43-a27bf97ab447"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.230970 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-kube-api-access-rr5mr" (OuterVolumeSpecName: "kube-api-access-rr5mr") pod "82f6fa3d-4fd7-422a-9a43-a27bf97ab447" (UID: "82f6fa3d-4fd7-422a-9a43-a27bf97ab447"). InnerVolumeSpecName "kube-api-access-rr5mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.256058 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-frvvc"] Feb 27 16:45:36 crc kubenswrapper[4751]: W0227 16:45:36.257065 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda888fc6d_a4cc_4bc8_bca1_dafdfed15274.slice/crio-03459f4abeec407bc2b87c5867eb9a54da4d26b721a60914c4b284d63078e548 WatchSource:0}: Error finding container 03459f4abeec407bc2b87c5867eb9a54da4d26b721a60914c4b284d63078e548: Status 404 returned error can't find the container with id 03459f4abeec407bc2b87c5867eb9a54da4d26b721a60914c4b284d63078e548 Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.308372 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 27 16:45:36 crc kubenswrapper[4751]: W0227 16:45:36.310671 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95b4a6cb_a957_4a31_8510_292eb1305ad6.slice/crio-d735ddd6fc4ae0cc68c0180d55cfe240e0a1db24d8be54690ffa21863e8eb509 WatchSource:0}: Error finding container d735ddd6fc4ae0cc68c0180d55cfe240e0a1db24d8be54690ffa21863e8eb509: Status 404 returned error can't find the container with id d735ddd6fc4ae0cc68c0180d55cfe240e0a1db24d8be54690ffa21863e8eb509 Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.327979 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.328012 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rr5mr\" (UniqueName: \"kubernetes.io/projected/82f6fa3d-4fd7-422a-9a43-a27bf97ab447-kube-api-access-rr5mr\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.459470 4751 generic.go:334] "Generic (PLEG): container finished" podID="4dd2754d-ed93-4494-ae20-b38295ba9fff" containerID="544f50627f746ac00b0478377ae0f0bde6c9e30bdfd7fa475e9d51ee964262ad" exitCode=0 Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.459570 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" event={"ID":"4dd2754d-ed93-4494-ae20-b38295ba9fff","Type":"ContainerDied","Data":"544f50627f746ac00b0478377ae0f0bde6c9e30bdfd7fa475e9d51ee964262ad"} Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.461096 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm" event={"ID":"3f29e0f7-8556-4570-a115-1d1ee089479c","Type":"ContainerStarted","Data":"1eb85dbb8f2524a4d5419fdd2893031b7e550048413c3dc29f369b1bd2104490"} Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.462379 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" event={"ID":"82f6fa3d-4fd7-422a-9a43-a27bf97ab447","Type":"ContainerDied","Data":"bfe08ce847ca4ecf3445a3de40629334f42e31334f949c5e4c8afd04616819b1"} Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.462473 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4lcvw" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.472592 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.474919 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ad38c130-da58-4681-ac04-c017147fcc6e","Type":"ContainerStarted","Data":"3a9063174388779578239d1f785ca827abbb194ec885bfb9bc94b5ba17289a0a"} Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.483018 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4d395a15-ded3-4216-a09e-85b0305c2225","Type":"ContainerStarted","Data":"a4b4d5b0b6bb6127fab72bdb2c190cb9dffe1db9ce9611e03e378c561e013581"} Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.484985 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"253a763c-21da-4224-91a2-e3bdc6eca0e9","Type":"ContainerStarted","Data":"1c6b87f8293f7739572020a0f496a0dba1b8cfc5e6e41dc3f1bac3f62bb80a2f"} Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.486963 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"95b4a6cb-a957-4a31-8510-292eb1305ad6","Type":"ContainerStarted","Data":"d735ddd6fc4ae0cc68c0180d55cfe240e0a1db24d8be54690ffa21863e8eb509"} Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.488104 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" event={"ID":"6d791fba-abfa-4f84-af28-a75fa6596882","Type":"ContainerDied","Data":"2f3ea58bb05476286732895a43b017ad4fc87846eed1a4bf39c5433ccad7d8d5"} Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.488174 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-ct7qm" Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.489498 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frvvc" event={"ID":"a888fc6d-a4cc-4bc8-bca1-dafdfed15274","Type":"ContainerStarted","Data":"03459f4abeec407bc2b87c5867eb9a54da4d26b721a60914c4b284d63078e548"} Feb 27 16:45:36 crc kubenswrapper[4751]: W0227 16:45:36.516932 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod438372fd_dcc8_47e3_a547_c8a1729b2f1f.slice/crio-e1c16cb0b8b15b9a2587d6ad0f42f523773b2f00301aa390b5128119547643bf WatchSource:0}: Error finding container e1c16cb0b8b15b9a2587d6ad0f42f523773b2f00301aa390b5128119547643bf: Status 404 returned error can't find the container with id e1c16cb0b8b15b9a2587d6ad0f42f523773b2f00301aa390b5128119547643bf Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.563175 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4lcvw"] Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.571454 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4lcvw"] Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.642586 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-ct7qm"] Feb 27 16:45:36 crc kubenswrapper[4751]: I0227 16:45:36.680113 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-ct7qm"] Feb 27 16:45:37 crc kubenswrapper[4751]: I0227 16:45:37.510177 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" event={"ID":"4dd2754d-ed93-4494-ae20-b38295ba9fff","Type":"ContainerStarted","Data":"ab41f5ccb0a4cf0b5b23f112245d7c026e3757eb63d5c8a9f13f9a431eed3bad"} Feb 27 16:45:37 crc kubenswrapper[4751]: I0227 16:45:37.510664 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:37 crc kubenswrapper[4751]: I0227 16:45:37.514160 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"51a81c6a-6814-412d-b77d-e741f1f74446","Type":"ContainerStarted","Data":"4c6716148a74ea8af28ec00f8d9776e6a9149b4724fe5543af6b7a72f9411e92"} Feb 27 16:45:37 crc kubenswrapper[4751]: I0227 16:45:37.517692 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"cecf602c-dec2-40c6-922c-bf84b707b1b9","Type":"ContainerStarted","Data":"90d048165126f4b62e9010d52adea94a9bf9162b44553551cc7d28985890a0a2"} Feb 27 16:45:37 crc kubenswrapper[4751]: I0227 16:45:37.518927 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"438372fd-dcc8-47e3-a547-c8a1729b2f1f","Type":"ContainerStarted","Data":"e1c16cb0b8b15b9a2587d6ad0f42f523773b2f00301aa390b5128119547643bf"} Feb 27 16:45:37 crc kubenswrapper[4751]: I0227 16:45:37.531496 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" podStartSLOduration=3.511086336 podStartE2EDuration="27.531480592s" podCreationTimestamp="2026-02-27 16:45:10 +0000 UTC" firstStartedPulling="2026-02-27 16:45:11.393040347 +0000 UTC m=+1273.540054794" lastFinishedPulling="2026-02-27 16:45:35.413434603 +0000 UTC m=+1297.560449050" observedRunningTime="2026-02-27 16:45:37.527071534 +0000 UTC m=+1299.674085981" watchObservedRunningTime="2026-02-27 16:45:37.531480592 +0000 UTC m=+1299.678495039" Feb 27 16:45:38 crc kubenswrapper[4751]: I0227 16:45:38.533572 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d791fba-abfa-4f84-af28-a75fa6596882" path="/var/lib/kubelet/pods/6d791fba-abfa-4f84-af28-a75fa6596882/volumes" Feb 27 16:45:38 crc kubenswrapper[4751]: I0227 16:45:38.534502 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82f6fa3d-4fd7-422a-9a43-a27bf97ab447" path="/var/lib/kubelet/pods/82f6fa3d-4fd7-422a-9a43-a27bf97ab447/volumes" Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.599472 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3c3834ac-6796-485b-9dec-e45cebf976df","Type":"ContainerStarted","Data":"c70cb3e82423521f3790af75416c702817be4dee431d0d08dcd2396683feb66d"} Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.600001 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.601543 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"253a763c-21da-4224-91a2-e3bdc6eca0e9","Type":"ContainerStarted","Data":"a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019"} Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.603146 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"438372fd-dcc8-47e3-a547-c8a1729b2f1f","Type":"ContainerStarted","Data":"21f622d5f20191c0f88a55bea7b29298d2743cf401c56a29ef7e4407b0fd82c6"} Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.606233 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4d395a15-ded3-4216-a09e-85b0305c2225","Type":"ContainerStarted","Data":"6d76eae4f0a48089e17a193410c4eec54a030b572ca6ae036b07233bd4f4615a"} Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.612370 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ad38c130-da58-4681-ac04-c017147fcc6e","Type":"ContainerStarted","Data":"1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923"} Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.612486 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.615615 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm" event={"ID":"3f29e0f7-8556-4570-a115-1d1ee089479c","Type":"ContainerStarted","Data":"5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168"} Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.616248 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-gdjfm" Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.620235 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=23.387341382 podStartE2EDuration="30.620221143s" podCreationTimestamp="2026-02-27 16:45:14 +0000 UTC" firstStartedPulling="2026-02-27 16:45:35.334640738 +0000 UTC m=+1297.481655185" lastFinishedPulling="2026-02-27 16:45:42.567520459 +0000 UTC m=+1304.714534946" observedRunningTime="2026-02-27 16:45:44.616583095 +0000 UTC m=+1306.763597542" watchObservedRunningTime="2026-02-27 16:45:44.620221143 +0000 UTC m=+1306.767235590" Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.621285 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"95b4a6cb-a957-4a31-8510-292eb1305ad6","Type":"ContainerStarted","Data":"67b68970dcc70c2551fa94757bc51fc1016792d1261833505238d86e1d89cc24"} Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.623156 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frvvc" event={"ID":"a888fc6d-a4cc-4bc8-bca1-dafdfed15274","Type":"ContainerStarted","Data":"03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c"} Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.665297 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=20.77183327 podStartE2EDuration="28.665280916s" podCreationTimestamp="2026-02-27 16:45:16 +0000 UTC" firstStartedPulling="2026-02-27 16:45:36.235723069 +0000 UTC m=+1298.382737516" lastFinishedPulling="2026-02-27 16:45:44.129170715 +0000 UTC m=+1306.276185162" observedRunningTime="2026-02-27 16:45:44.660022226 +0000 UTC m=+1306.807036673" watchObservedRunningTime="2026-02-27 16:45:44.665280916 +0000 UTC m=+1306.812295363" Feb 27 16:45:44 crc kubenswrapper[4751]: I0227 16:45:44.683555 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-gdjfm" podStartSLOduration=16.39102463 podStartE2EDuration="23.683534834s" podCreationTimestamp="2026-02-27 16:45:21 +0000 UTC" firstStartedPulling="2026-02-27 16:45:36.20768054 +0000 UTC m=+1298.354694987" lastFinishedPulling="2026-02-27 16:45:43.500190744 +0000 UTC m=+1305.647205191" observedRunningTime="2026-02-27 16:45:44.680547984 +0000 UTC m=+1306.827562431" watchObservedRunningTime="2026-02-27 16:45:44.683534834 +0000 UTC m=+1306.830549271" Feb 27 16:45:45 crc kubenswrapper[4751]: I0227 16:45:45.631944 4751 generic.go:334] "Generic (PLEG): container finished" podID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerID="03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c" exitCode=0 Feb 27 16:45:45 crc kubenswrapper[4751]: I0227 16:45:45.632082 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frvvc" event={"ID":"a888fc6d-a4cc-4bc8-bca1-dafdfed15274","Type":"ContainerDied","Data":"03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c"} Feb 27 16:45:45 crc kubenswrapper[4751]: I0227 16:45:45.976563 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.040762 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-8jhlv"] Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.422858 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.608015 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6l8q\" (UniqueName: \"kubernetes.io/projected/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-kube-api-access-p6l8q\") pod \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.608108 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-dns-svc\") pod \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.608148 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-config\") pod \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\" (UID: \"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6\") " Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.614670 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-kube-api-access-p6l8q" (OuterVolumeSpecName: "kube-api-access-p6l8q") pod "e505cef0-6aa5-4e35-ba8d-cb8797afd6e6" (UID: "e505cef0-6aa5-4e35-ba8d-cb8797afd6e6"). InnerVolumeSpecName "kube-api-access-p6l8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.633859 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e505cef0-6aa5-4e35-ba8d-cb8797afd6e6" (UID: "e505cef0-6aa5-4e35-ba8d-cb8797afd6e6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.648215 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-config" (OuterVolumeSpecName: "config") pod "e505cef0-6aa5-4e35-ba8d-cb8797afd6e6" (UID: "e505cef0-6aa5-4e35-ba8d-cb8797afd6e6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.674106 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frvvc" event={"ID":"a888fc6d-a4cc-4bc8-bca1-dafdfed15274","Type":"ContainerStarted","Data":"3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9"} Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.674157 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frvvc" event={"ID":"a888fc6d-a4cc-4bc8-bca1-dafdfed15274","Type":"ContainerStarted","Data":"f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47"} Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.674225 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.679355 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" event={"ID":"e505cef0-6aa5-4e35-ba8d-cb8797afd6e6","Type":"ContainerDied","Data":"6de54e6c71bbb4289ad37ca81ff2a5b0e74eadfa06d62e6581af8daa535ea63d"} Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.679360 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-8jhlv" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.709872 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6l8q\" (UniqueName: \"kubernetes.io/projected/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-kube-api-access-p6l8q\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.709898 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.709907 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.713343 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-frvvc" podStartSLOduration=18.762129449 podStartE2EDuration="25.713322836s" podCreationTimestamp="2026-02-27 16:45:21 +0000 UTC" firstStartedPulling="2026-02-27 16:45:36.260302525 +0000 UTC m=+1298.407316972" lastFinishedPulling="2026-02-27 16:45:43.211495922 +0000 UTC m=+1305.358510359" observedRunningTime="2026-02-27 16:45:46.701654294 +0000 UTC m=+1308.848668741" watchObservedRunningTime="2026-02-27 16:45:46.713322836 +0000 UTC m=+1308.860337283" Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.766254 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-8jhlv"] Feb 27 16:45:46 crc kubenswrapper[4751]: I0227 16:45:46.780256 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-8jhlv"] Feb 27 16:45:47 crc kubenswrapper[4751]: I0227 16:45:47.131887 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:45:48 crc kubenswrapper[4751]: I0227 16:45:48.528918 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e505cef0-6aa5-4e35-ba8d-cb8797afd6e6" path="/var/lib/kubelet/pods/e505cef0-6aa5-4e35-ba8d-cb8797afd6e6/volumes" Feb 27 16:45:48 crc kubenswrapper[4751]: I0227 16:45:48.700598 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"438372fd-dcc8-47e3-a547-c8a1729b2f1f","Type":"ContainerStarted","Data":"6c74828590d469e03165fd6f252422867fd14e665dbe3ddfb7a6c1b2f1561bb7"} Feb 27 16:45:48 crc kubenswrapper[4751]: I0227 16:45:48.702734 4751 generic.go:334] "Generic (PLEG): container finished" podID="4d395a15-ded3-4216-a09e-85b0305c2225" containerID="6d76eae4f0a48089e17a193410c4eec54a030b572ca6ae036b07233bd4f4615a" exitCode=0 Feb 27 16:45:48 crc kubenswrapper[4751]: I0227 16:45:48.702804 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4d395a15-ded3-4216-a09e-85b0305c2225","Type":"ContainerDied","Data":"6d76eae4f0a48089e17a193410c4eec54a030b572ca6ae036b07233bd4f4615a"} Feb 27 16:45:48 crc kubenswrapper[4751]: I0227 16:45:48.705433 4751 generic.go:334] "Generic (PLEG): container finished" podID="253a763c-21da-4224-91a2-e3bdc6eca0e9" containerID="a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019" exitCode=0 Feb 27 16:45:48 crc kubenswrapper[4751]: I0227 16:45:48.705508 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"253a763c-21da-4224-91a2-e3bdc6eca0e9","Type":"ContainerDied","Data":"a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019"} Feb 27 16:45:48 crc kubenswrapper[4751]: I0227 16:45:48.710508 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"95b4a6cb-a957-4a31-8510-292eb1305ad6","Type":"ContainerStarted","Data":"5f0141511ca3d3aa75b1878aa729a5715a4ce124f70bcdf3e79e44f61c356a32"} Feb 27 16:45:48 crc kubenswrapper[4751]: I0227 16:45:48.728467 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=13.915696013 podStartE2EDuration="25.728444866s" podCreationTimestamp="2026-02-27 16:45:23 +0000 UTC" firstStartedPulling="2026-02-27 16:45:36.518881022 +0000 UTC m=+1298.665895469" lastFinishedPulling="2026-02-27 16:45:48.331629875 +0000 UTC m=+1310.478644322" observedRunningTime="2026-02-27 16:45:48.726846423 +0000 UTC m=+1310.873860870" watchObservedRunningTime="2026-02-27 16:45:48.728444866 +0000 UTC m=+1310.875459353" Feb 27 16:45:48 crc kubenswrapper[4751]: I0227 16:45:48.802186 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=17.793641272 podStartE2EDuration="29.802147404s" podCreationTimestamp="2026-02-27 16:45:19 +0000 UTC" firstStartedPulling="2026-02-27 16:45:36.31253022 +0000 UTC m=+1298.459544667" lastFinishedPulling="2026-02-27 16:45:48.321036332 +0000 UTC m=+1310.468050799" observedRunningTime="2026-02-27 16:45:48.800641724 +0000 UTC m=+1310.947656171" watchObservedRunningTime="2026-02-27 16:45:48.802147404 +0000 UTC m=+1310.949161851" Feb 27 16:45:49 crc kubenswrapper[4751]: I0227 16:45:49.493237 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:49 crc kubenswrapper[4751]: I0227 16:45:49.723922 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4d395a15-ded3-4216-a09e-85b0305c2225","Type":"ContainerStarted","Data":"70a38562ca2c9a0c4ec9524467e090e29e28ad4754513ec3db92df66b24fd0e5"} Feb 27 16:45:49 crc kubenswrapper[4751]: I0227 16:45:49.728158 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"253a763c-21da-4224-91a2-e3bdc6eca0e9","Type":"ContainerStarted","Data":"48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329"} Feb 27 16:45:49 crc kubenswrapper[4751]: I0227 16:45:49.764104 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=29.556418673 podStartE2EDuration="36.764082311s" podCreationTimestamp="2026-02-27 16:45:13 +0000 UTC" firstStartedPulling="2026-02-27 16:45:36.111234893 +0000 UTC m=+1298.258249340" lastFinishedPulling="2026-02-27 16:45:43.318898521 +0000 UTC m=+1305.465912978" observedRunningTime="2026-02-27 16:45:49.758147512 +0000 UTC m=+1311.905161999" watchObservedRunningTime="2026-02-27 16:45:49.764082311 +0000 UTC m=+1311.911096768" Feb 27 16:45:50 crc kubenswrapper[4751]: I0227 16:45:50.296653 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Feb 27 16:45:50 crc kubenswrapper[4751]: I0227 16:45:50.336718 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=30.385591051 podStartE2EDuration="38.336682667s" podCreationTimestamp="2026-02-27 16:45:12 +0000 UTC" firstStartedPulling="2026-02-27 16:45:36.05158585 +0000 UTC m=+1298.198600297" lastFinishedPulling="2026-02-27 16:45:44.002677466 +0000 UTC m=+1306.149691913" observedRunningTime="2026-02-27 16:45:49.794809342 +0000 UTC m=+1311.941823799" watchObservedRunningTime="2026-02-27 16:45:50.336682667 +0000 UTC m=+1312.483697154" Feb 27 16:45:51 crc kubenswrapper[4751]: I0227 16:45:51.093287 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:51 crc kubenswrapper[4751]: I0227 16:45:51.093723 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:51 crc kubenswrapper[4751]: I0227 16:45:51.128997 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:51 crc kubenswrapper[4751]: I0227 16:45:51.494155 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:51 crc kubenswrapper[4751]: I0227 16:45:51.556479 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:51 crc kubenswrapper[4751]: I0227 16:45:51.779106 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Feb 27 16:45:51 crc kubenswrapper[4751]: I0227 16:45:51.787915 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.032859 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-6ftj2"] Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.034307 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.036394 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.043566 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-6ftj2"] Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.093629 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-xcsrx"] Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.094961 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.096943 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.105475 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.105552 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bc5q\" (UniqueName: \"kubernetes.io/projected/09570fa6-55f0-442b-837c-b1c9b591de83-kube-api-access-7bc5q\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.105591 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-config\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.105660 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.110621 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-xcsrx"] Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.187878 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-6ftj2"] Feb 27 16:45:52 crc kubenswrapper[4751]: E0227 16:45:52.188487 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-7bc5q ovsdbserver-sb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" podUID="09570fa6-55f0-442b-837c-b1c9b591de83" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.211330 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbjws\" (UniqueName: \"kubernetes.io/projected/da8c688d-4446-4f25-853d-0f694094d0af-kube-api-access-rbjws\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.212013 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovs-rundir\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.212059 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8c688d-4446-4f25-853d-0f694094d0af-config\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.212094 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.212151 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bc5q\" (UniqueName: \"kubernetes.io/projected/09570fa6-55f0-442b-837c-b1c9b591de83-kube-api-access-7bc5q\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.212199 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-config\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.212280 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.212318 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovn-rundir\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.212363 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-combined-ca-bundle\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.212393 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.213337 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.213456 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.213881 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-config\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.223578 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-cjpbx"] Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.225640 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.229242 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.235102 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.236731 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.245659 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.245925 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.246047 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.246226 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-7mtzm" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.247029 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bc5q\" (UniqueName: \"kubernetes.io/projected/09570fa6-55f0-442b-837c-b1c9b591de83-kube-api-access-7bc5q\") pod \"dnsmasq-dns-7f896c8c65-6ftj2\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.249566 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-cjpbx"] Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.295587 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313502 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313558 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbjws\" (UniqueName: \"kubernetes.io/projected/da8c688d-4446-4f25-853d-0f694094d0af-kube-api-access-rbjws\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313585 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313604 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovs-rundir\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313628 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8c688d-4446-4f25-853d-0f694094d0af-config\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313671 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313691 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313705 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54qj2\" (UniqueName: \"kubernetes.io/projected/09e809dd-dac1-46e4-a554-938e8eb7ccd5-kube-api-access-54qj2\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313727 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-scripts\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313747 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-config\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313765 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313788 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313803 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313820 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-config\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313839 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovn-rundir\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313857 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58748\" (UniqueName: \"kubernetes.io/projected/36495e7a-b8f8-4d54-a504-e92bb6211327-kube-api-access-58748\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313881 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-combined-ca-bundle\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.313901 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.315822 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovn-rundir\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.316224 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovs-rundir\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.319924 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8c688d-4446-4f25-853d-0f694094d0af-config\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.320533 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.323913 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-combined-ca-bundle\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.328510 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbjws\" (UniqueName: \"kubernetes.io/projected/da8c688d-4446-4f25-853d-0f694094d0af-kube-api-access-rbjws\") pod \"ovn-controller-metrics-xcsrx\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415503 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415550 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415570 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54qj2\" (UniqueName: \"kubernetes.io/projected/09e809dd-dac1-46e4-a554-938e8eb7ccd5-kube-api-access-54qj2\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415595 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-scripts\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415616 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-config\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415634 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415658 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415674 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-config\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415697 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58748\" (UniqueName: \"kubernetes.io/projected/36495e7a-b8f8-4d54-a504-e92bb6211327-kube-api-access-58748\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415721 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415752 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.415780 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.417030 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.417187 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.417266 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.418772 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-scripts\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.419210 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-config\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.419231 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.419394 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.420621 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-config\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.421041 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.433058 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.433256 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.436518 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54qj2\" (UniqueName: \"kubernetes.io/projected/09e809dd-dac1-46e4-a554-938e8eb7ccd5-kube-api-access-54qj2\") pod \"dnsmasq-dns-86db49b7ff-cjpbx\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.440227 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58748\" (UniqueName: \"kubernetes.io/projected/36495e7a-b8f8-4d54-a504-e92bb6211327-kube-api-access-58748\") pod \"ovn-northd-0\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.594257 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.602079 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.752492 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.765248 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.824844 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-dns-svc\") pod \"09570fa6-55f0-442b-837c-b1c9b591de83\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.824951 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-ovsdbserver-sb\") pod \"09570fa6-55f0-442b-837c-b1c9b591de83\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.825031 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bc5q\" (UniqueName: \"kubernetes.io/projected/09570fa6-55f0-442b-837c-b1c9b591de83-kube-api-access-7bc5q\") pod \"09570fa6-55f0-442b-837c-b1c9b591de83\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.825118 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-config\") pod \"09570fa6-55f0-442b-837c-b1c9b591de83\" (UID: \"09570fa6-55f0-442b-837c-b1c9b591de83\") " Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.826752 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "09570fa6-55f0-442b-837c-b1c9b591de83" (UID: "09570fa6-55f0-442b-837c-b1c9b591de83"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.827332 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "09570fa6-55f0-442b-837c-b1c9b591de83" (UID: "09570fa6-55f0-442b-837c-b1c9b591de83"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.827567 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-config" (OuterVolumeSpecName: "config") pod "09570fa6-55f0-442b-837c-b1c9b591de83" (UID: "09570fa6-55f0-442b-837c-b1c9b591de83"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.831032 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09570fa6-55f0-442b-837c-b1c9b591de83-kube-api-access-7bc5q" (OuterVolumeSpecName: "kube-api-access-7bc5q") pod "09570fa6-55f0-442b-837c-b1c9b591de83" (UID: "09570fa6-55f0-442b-837c-b1c9b591de83"). InnerVolumeSpecName "kube-api-access-7bc5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.848689 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-xcsrx"] Feb 27 16:45:52 crc kubenswrapper[4751]: W0227 16:45:52.862820 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda8c688d_4446_4f25_853d_0f694094d0af.slice/crio-bbdea870242c38adabac7f852cce023aaf80328526d522bae0e76cf888724c8a WatchSource:0}: Error finding container bbdea870242c38adabac7f852cce023aaf80328526d522bae0e76cf888724c8a: Status 404 returned error can't find the container with id bbdea870242c38adabac7f852cce023aaf80328526d522bae0e76cf888724c8a Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.928828 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bc5q\" (UniqueName: \"kubernetes.io/projected/09570fa6-55f0-442b-837c-b1c9b591de83-kube-api-access-7bc5q\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.928885 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.928898 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:52 crc kubenswrapper[4751]: I0227 16:45:52.928918 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09570fa6-55f0-442b-837c-b1c9b591de83-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:53 crc kubenswrapper[4751]: W0227 16:45:53.062902 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e809dd_dac1_46e4_a554_938e8eb7ccd5.slice/crio-332c07fa125dcc1ba669fed5d2c6d9f2e596947bacd72db63e4c67e1bfe9a653 WatchSource:0}: Error finding container 332c07fa125dcc1ba669fed5d2c6d9f2e596947bacd72db63e4c67e1bfe9a653: Status 404 returned error can't find the container with id 332c07fa125dcc1ba669fed5d2c6d9f2e596947bacd72db63e4c67e1bfe9a653 Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.064890 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-cjpbx"] Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.126188 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.379738 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.381640 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.760742 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"36495e7a-b8f8-4d54-a504-e92bb6211327","Type":"ContainerStarted","Data":"4f81e2702cb5555abb4b0fcf13911e5fc069527acbd24c2bf2ca02167e55e1c3"} Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.762330 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-xcsrx" event={"ID":"da8c688d-4446-4f25-853d-0f694094d0af","Type":"ContainerStarted","Data":"d795f9fa74378811b1eaa8d00254a0ee94069992318ba57c1f1494363a208bf8"} Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.762365 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-xcsrx" event={"ID":"da8c688d-4446-4f25-853d-0f694094d0af","Type":"ContainerStarted","Data":"bbdea870242c38adabac7f852cce023aaf80328526d522bae0e76cf888724c8a"} Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.763903 4751 generic.go:334] "Generic (PLEG): container finished" podID="09e809dd-dac1-46e4-a554-938e8eb7ccd5" containerID="7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c" exitCode=0 Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.764224 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" event={"ID":"09e809dd-dac1-46e4-a554-938e8eb7ccd5","Type":"ContainerDied","Data":"7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c"} Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.764249 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" event={"ID":"09e809dd-dac1-46e4-a554-938e8eb7ccd5","Type":"ContainerStarted","Data":"332c07fa125dcc1ba669fed5d2c6d9f2e596947bacd72db63e4c67e1bfe9a653"} Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.764928 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-6ftj2" Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.832923 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-xcsrx" podStartSLOduration=1.832901371 podStartE2EDuration="1.832901371s" podCreationTimestamp="2026-02-27 16:45:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:45:53.789186603 +0000 UTC m=+1315.936201100" watchObservedRunningTime="2026-02-27 16:45:53.832901371 +0000 UTC m=+1315.979915838" Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.981953 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-6ftj2"] Feb 27 16:45:53 crc kubenswrapper[4751]: I0227 16:45:53.989185 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-6ftj2"] Feb 27 16:45:54 crc kubenswrapper[4751]: I0227 16:45:54.530334 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09570fa6-55f0-442b-837c-b1c9b591de83" path="/var/lib/kubelet/pods/09570fa6-55f0-442b-837c-b1c9b591de83/volumes" Feb 27 16:45:54 crc kubenswrapper[4751]: I0227 16:45:54.776614 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" event={"ID":"09e809dd-dac1-46e4-a554-938e8eb7ccd5","Type":"ContainerStarted","Data":"7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735"} Feb 27 16:45:54 crc kubenswrapper[4751]: I0227 16:45:54.797806 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:54 crc kubenswrapper[4751]: I0227 16:45:54.799051 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:54 crc kubenswrapper[4751]: I0227 16:45:54.803490 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" podStartSLOduration=2.803472197 podStartE2EDuration="2.803472197s" podCreationTimestamp="2026-02-27 16:45:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:45:54.795763841 +0000 UTC m=+1316.942778288" watchObservedRunningTime="2026-02-27 16:45:54.803472197 +0000 UTC m=+1316.950486644" Feb 27 16:45:54 crc kubenswrapper[4751]: I0227 16:45:54.879135 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:55 crc kubenswrapper[4751]: I0227 16:45:55.786459 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:55 crc kubenswrapper[4751]: I0227 16:45:55.883676 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Feb 27 16:45:56 crc kubenswrapper[4751]: I0227 16:45:56.368239 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Feb 27 16:45:56 crc kubenswrapper[4751]: I0227 16:45:56.441625 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Feb 27 16:45:56 crc kubenswrapper[4751]: I0227 16:45:56.796707 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"36495e7a-b8f8-4d54-a504-e92bb6211327","Type":"ContainerStarted","Data":"8f37b9a53b57fd59b8d193823dd9bac3b95253b3c09ec6d44395ab006d4399e8"} Feb 27 16:45:56 crc kubenswrapper[4751]: I0227 16:45:56.796757 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"36495e7a-b8f8-4d54-a504-e92bb6211327","Type":"ContainerStarted","Data":"f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d"} Feb 27 16:45:56 crc kubenswrapper[4751]: I0227 16:45:56.819281 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=1.968960155 podStartE2EDuration="4.819263245s" podCreationTimestamp="2026-02-27 16:45:52 +0000 UTC" firstStartedPulling="2026-02-27 16:45:53.131166845 +0000 UTC m=+1315.278181292" lastFinishedPulling="2026-02-27 16:45:55.981469905 +0000 UTC m=+1318.128484382" observedRunningTime="2026-02-27 16:45:56.81720524 +0000 UTC m=+1318.964219697" watchObservedRunningTime="2026-02-27 16:45:56.819263245 +0000 UTC m=+1318.966277692" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.255686 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.365791 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-cjpbx"] Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.397124 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-6px47"] Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.408010 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.422514 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-6px47"] Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.502143 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-config\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.502201 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.502253 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-dns-svc\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.502278 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64shn\" (UniqueName: \"kubernetes.io/projected/c68647f7-0ae5-4339-9449-b492f1e3b6b9-kube-api-access-64shn\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.502298 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.603323 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64shn\" (UniqueName: \"kubernetes.io/projected/c68647f7-0ae5-4339-9449-b492f1e3b6b9-kube-api-access-64shn\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.603393 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.603563 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-config\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.603639 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.603681 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-dns-svc\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.604694 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-dns-svc\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.604772 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-config\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.605171 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.605310 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.623968 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64shn\" (UniqueName: \"kubernetes.io/projected/c68647f7-0ae5-4339-9449-b492f1e3b6b9-kube-api-access-64shn\") pod \"dnsmasq-dns-698758b865-6px47\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.732009 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.803988 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Feb 27 16:45:57 crc kubenswrapper[4751]: I0227 16:45:57.804914 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" podUID="09e809dd-dac1-46e4-a554-938e8eb7ccd5" containerName="dnsmasq-dns" containerID="cri-o://7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735" gracePeriod=10 Feb 27 16:45:58 crc kubenswrapper[4751]: W0227 16:45:58.191011 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc68647f7_0ae5_4339_9449_b492f1e3b6b9.slice/crio-af563aa031123af53af22f855ede0fdfab46ea7eaf69ed76dc0093648e4c64ef WatchSource:0}: Error finding container af563aa031123af53af22f855ede0fdfab46ea7eaf69ed76dc0093648e4c64ef: Status 404 returned error can't find the container with id af563aa031123af53af22f855ede0fdfab46ea7eaf69ed76dc0093648e4c64ef Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.193981 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-6px47"] Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.316786 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.416227 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-config\") pod \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.416280 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-dns-svc\") pod \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.416341 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54qj2\" (UniqueName: \"kubernetes.io/projected/09e809dd-dac1-46e4-a554-938e8eb7ccd5-kube-api-access-54qj2\") pod \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.416377 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-sb\") pod \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.416599 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-nb\") pod \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\" (UID: \"09e809dd-dac1-46e4-a554-938e8eb7ccd5\") " Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.422470 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09e809dd-dac1-46e4-a554-938e8eb7ccd5-kube-api-access-54qj2" (OuterVolumeSpecName: "kube-api-access-54qj2") pod "09e809dd-dac1-46e4-a554-938e8eb7ccd5" (UID: "09e809dd-dac1-46e4-a554-938e8eb7ccd5"). InnerVolumeSpecName "kube-api-access-54qj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.460623 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "09e809dd-dac1-46e4-a554-938e8eb7ccd5" (UID: "09e809dd-dac1-46e4-a554-938e8eb7ccd5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.461939 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Feb 27 16:45:58 crc kubenswrapper[4751]: E0227 16:45:58.462249 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09e809dd-dac1-46e4-a554-938e8eb7ccd5" containerName="dnsmasq-dns" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.462266 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="09e809dd-dac1-46e4-a554-938e8eb7ccd5" containerName="dnsmasq-dns" Feb 27 16:45:58 crc kubenswrapper[4751]: E0227 16:45:58.462299 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09e809dd-dac1-46e4-a554-938e8eb7ccd5" containerName="init" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.462306 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="09e809dd-dac1-46e4-a554-938e8eb7ccd5" containerName="init" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.462491 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="09e809dd-dac1-46e4-a554-938e8eb7ccd5" containerName="dnsmasq-dns" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.463770 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "09e809dd-dac1-46e4-a554-938e8eb7ccd5" (UID: "09e809dd-dac1-46e4-a554-938e8eb7ccd5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.464606 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "09e809dd-dac1-46e4-a554-938e8eb7ccd5" (UID: "09e809dd-dac1-46e4-a554-938e8eb7ccd5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.464860 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-config" (OuterVolumeSpecName: "config") pod "09e809dd-dac1-46e4-a554-938e8eb7ccd5" (UID: "09e809dd-dac1-46e4-a554-938e8eb7ccd5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.483551 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.506724 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-jglhv" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.508240 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.509580 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.509774 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.510017 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.518476 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.518534 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.518551 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.518563 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54qj2\" (UniqueName: \"kubernetes.io/projected/09e809dd-dac1-46e4-a554-938e8eb7ccd5-kube-api-access-54qj2\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.518575 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09e809dd-dac1-46e4-a554-938e8eb7ccd5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.620349 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-lock\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.620644 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.620696 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-cache\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.621253 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.621294 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.621330 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zttb\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-kube-api-access-7zttb\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.722753 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.722814 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.722847 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zttb\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-kube-api-access-7zttb\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.722919 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-lock\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.722956 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.722999 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-cache\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.723535 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-cache\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.723807 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: E0227 16:45:58.727736 4751 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 27 16:45:58 crc kubenswrapper[4751]: E0227 16:45:58.727788 4751 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 27 16:45:58 crc kubenswrapper[4751]: E0227 16:45:58.727872 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift podName:2fd9f1bc-399b-4282-a2cf-b76526fcfca5 nodeName:}" failed. No retries permitted until 2026-02-27 16:45:59.227844879 +0000 UTC m=+1321.374859386 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift") pod "swift-storage-0" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5") : configmap "swift-ring-files" not found Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.727979 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-lock\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.734170 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.748503 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zttb\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-kube-api-access-7zttb\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.750044 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.812863 4751 generic.go:334] "Generic (PLEG): container finished" podID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" containerID="0deb72468fc72b012f613e715480ffe7175bc19240a8fea4df9f160b1950ec71" exitCode=0 Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.812943 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-6px47" event={"ID":"c68647f7-0ae5-4339-9449-b492f1e3b6b9","Type":"ContainerDied","Data":"0deb72468fc72b012f613e715480ffe7175bc19240a8fea4df9f160b1950ec71"} Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.812972 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-6px47" event={"ID":"c68647f7-0ae5-4339-9449-b492f1e3b6b9","Type":"ContainerStarted","Data":"af563aa031123af53af22f855ede0fdfab46ea7eaf69ed76dc0093648e4c64ef"} Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.814997 4751 generic.go:334] "Generic (PLEG): container finished" podID="09e809dd-dac1-46e4-a554-938e8eb7ccd5" containerID="7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735" exitCode=0 Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.815067 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" event={"ID":"09e809dd-dac1-46e4-a554-938e8eb7ccd5","Type":"ContainerDied","Data":"7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735"} Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.815119 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" event={"ID":"09e809dd-dac1-46e4-a554-938e8eb7ccd5","Type":"ContainerDied","Data":"332c07fa125dcc1ba669fed5d2c6d9f2e596947bacd72db63e4c67e1bfe9a653"} Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.815147 4751 scope.go:117] "RemoveContainer" containerID="7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.815084 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-cjpbx" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.835297 4751 scope.go:117] "RemoveContainer" containerID="7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.860980 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-cjpbx"] Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.861008 4751 scope.go:117] "RemoveContainer" containerID="7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735" Feb 27 16:45:58 crc kubenswrapper[4751]: E0227 16:45:58.861337 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735\": container with ID starting with 7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735 not found: ID does not exist" containerID="7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.861372 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735"} err="failed to get container status \"7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735\": rpc error: code = NotFound desc = could not find container \"7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735\": container with ID starting with 7fcbcf56afa73c01200f92f75881c3d35ddcd67ebc19031407fb703a94a25735 not found: ID does not exist" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.861392 4751 scope.go:117] "RemoveContainer" containerID="7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c" Feb 27 16:45:58 crc kubenswrapper[4751]: E0227 16:45:58.861727 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c\": container with ID starting with 7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c not found: ID does not exist" containerID="7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.861759 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c"} err="failed to get container status \"7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c\": rpc error: code = NotFound desc = could not find container \"7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c\": container with ID starting with 7be5b397b4f24ae2c1a739e69575924ac569d695680d757b4df2a85168f8c95c not found: ID does not exist" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.868676 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-cjpbx"] Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.918903 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.918968 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.971462 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-sz5bd"] Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.972740 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.975308 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.976168 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.977521 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Feb 27 16:45:58 crc kubenswrapper[4751]: I0227 16:45:58.980342 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-sz5bd"] Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.030466 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-swiftconf\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.030520 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-ring-data-devices\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.030605 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-scripts\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.030668 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-dispersionconf\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.030694 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-combined-ca-bundle\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.030719 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-etc-swift\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.030815 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rp5f\" (UniqueName: \"kubernetes.io/projected/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-kube-api-access-7rp5f\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.136583 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-scripts\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.136678 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-dispersionconf\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.136708 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-combined-ca-bundle\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.136734 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-etc-swift\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.136821 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rp5f\" (UniqueName: \"kubernetes.io/projected/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-kube-api-access-7rp5f\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.136873 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-swiftconf\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.136904 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-ring-data-devices\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.137650 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-ring-data-devices\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.138207 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-scripts\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.139224 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-etc-swift\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.142895 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-combined-ca-bundle\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.143165 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-dispersionconf\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.143884 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-swiftconf\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.158454 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rp5f\" (UniqueName: \"kubernetes.io/projected/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-kube-api-access-7rp5f\") pod \"swift-ring-rebalance-sz5bd\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.238369 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:45:59 crc kubenswrapper[4751]: E0227 16:45:59.238633 4751 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 27 16:45:59 crc kubenswrapper[4751]: E0227 16:45:59.238676 4751 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 27 16:45:59 crc kubenswrapper[4751]: E0227 16:45:59.238779 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift podName:2fd9f1bc-399b-4282-a2cf-b76526fcfca5 nodeName:}" failed. No retries permitted until 2026-02-27 16:46:00.238748476 +0000 UTC m=+1322.385762953 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift") pod "swift-storage-0" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5") : configmap "swift-ring-files" not found Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.295233 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.769060 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-sz5bd"] Feb 27 16:45:59 crc kubenswrapper[4751]: W0227 16:45:59.774607 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10cb8075_6c76_438d_8ba7_cacfb6acd7fe.slice/crio-2f647e01f24758f35870a8526b0bd45e64e81b0d6ff029967b195ca0a50b95e4 WatchSource:0}: Error finding container 2f647e01f24758f35870a8526b0bd45e64e81b0d6ff029967b195ca0a50b95e4: Status 404 returned error can't find the container with id 2f647e01f24758f35870a8526b0bd45e64e81b0d6ff029967b195ca0a50b95e4 Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.826953 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-6px47" event={"ID":"c68647f7-0ae5-4339-9449-b492f1e3b6b9","Type":"ContainerStarted","Data":"baa27114786c2636b41d3933ceb7f4a7e53697beb788d8c6d80f5665a61be24e"} Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.827787 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.829622 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sz5bd" event={"ID":"10cb8075-6c76-438d-8ba7-cacfb6acd7fe","Type":"ContainerStarted","Data":"2f647e01f24758f35870a8526b0bd45e64e81b0d6ff029967b195ca0a50b95e4"} Feb 27 16:45:59 crc kubenswrapper[4751]: I0227 16:45:59.856461 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-6px47" podStartSLOduration=2.856443467 podStartE2EDuration="2.856443467s" podCreationTimestamp="2026-02-27 16:45:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:45:59.850368615 +0000 UTC m=+1321.997383062" watchObservedRunningTime="2026-02-27 16:45:59.856443467 +0000 UTC m=+1322.003457914" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.158943 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536846-d7tbv"] Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.160426 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536846-d7tbv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.165283 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.165642 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.165786 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.167642 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536846-d7tbv"] Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.263550 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.263682 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkg9q\" (UniqueName: \"kubernetes.io/projected/9ff1d6b8-692b-4578-91a6-69cda33c3b50-kube-api-access-hkg9q\") pod \"auto-csr-approver-29536846-d7tbv\" (UID: \"9ff1d6b8-692b-4578-91a6-69cda33c3b50\") " pod="openshift-infra/auto-csr-approver-29536846-d7tbv" Feb 27 16:46:00 crc kubenswrapper[4751]: E0227 16:46:00.263747 4751 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 27 16:46:00 crc kubenswrapper[4751]: E0227 16:46:00.263773 4751 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 27 16:46:00 crc kubenswrapper[4751]: E0227 16:46:00.263826 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift podName:2fd9f1bc-399b-4282-a2cf-b76526fcfca5 nodeName:}" failed. No retries permitted until 2026-02-27 16:46:02.263807399 +0000 UTC m=+1324.410821846 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift") pod "swift-storage-0" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5") : configmap "swift-ring-files" not found Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.365874 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkg9q\" (UniqueName: \"kubernetes.io/projected/9ff1d6b8-692b-4578-91a6-69cda33c3b50-kube-api-access-hkg9q\") pod \"auto-csr-approver-29536846-d7tbv\" (UID: \"9ff1d6b8-692b-4578-91a6-69cda33c3b50\") " pod="openshift-infra/auto-csr-approver-29536846-d7tbv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.385287 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkg9q\" (UniqueName: \"kubernetes.io/projected/9ff1d6b8-692b-4578-91a6-69cda33c3b50-kube-api-access-hkg9q\") pod \"auto-csr-approver-29536846-d7tbv\" (UID: \"9ff1d6b8-692b-4578-91a6-69cda33c3b50\") " pod="openshift-infra/auto-csr-approver-29536846-d7tbv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.485177 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536846-d7tbv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.531634 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09e809dd-dac1-46e4-a554-938e8eb7ccd5" path="/var/lib/kubelet/pods/09e809dd-dac1-46e4-a554-938e8eb7ccd5/volumes" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.636335 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-qxvzf"] Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.637394 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-qxvzf" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.657319 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-543a-account-create-update-5lwvv"] Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.658448 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-543a-account-create-update-5lwvv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.661274 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.666428 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-qxvzf"] Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.671895 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-operator-scripts\") pod \"glance-db-create-qxvzf\" (UID: \"0404436e-b5d2-4743-9d81-5bd0bab5b1d5\") " pod="openstack/glance-db-create-qxvzf" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.671983 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6t2w\" (UniqueName: \"kubernetes.io/projected/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-kube-api-access-q6t2w\") pod \"glance-db-create-qxvzf\" (UID: \"0404436e-b5d2-4743-9d81-5bd0bab5b1d5\") " pod="openstack/glance-db-create-qxvzf" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.674332 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-543a-account-create-update-5lwvv"] Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.773835 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92d68198-8212-48d0-b42a-1be37cc135f2-operator-scripts\") pod \"glance-543a-account-create-update-5lwvv\" (UID: \"92d68198-8212-48d0-b42a-1be37cc135f2\") " pod="openstack/glance-543a-account-create-update-5lwvv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.773869 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wz9hs\" (UniqueName: \"kubernetes.io/projected/92d68198-8212-48d0-b42a-1be37cc135f2-kube-api-access-wz9hs\") pod \"glance-543a-account-create-update-5lwvv\" (UID: \"92d68198-8212-48d0-b42a-1be37cc135f2\") " pod="openstack/glance-543a-account-create-update-5lwvv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.773929 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-operator-scripts\") pod \"glance-db-create-qxvzf\" (UID: \"0404436e-b5d2-4743-9d81-5bd0bab5b1d5\") " pod="openstack/glance-db-create-qxvzf" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.774198 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6t2w\" (UniqueName: \"kubernetes.io/projected/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-kube-api-access-q6t2w\") pod \"glance-db-create-qxvzf\" (UID: \"0404436e-b5d2-4743-9d81-5bd0bab5b1d5\") " pod="openstack/glance-db-create-qxvzf" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.774552 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-operator-scripts\") pod \"glance-db-create-qxvzf\" (UID: \"0404436e-b5d2-4743-9d81-5bd0bab5b1d5\") " pod="openstack/glance-db-create-qxvzf" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.798419 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6t2w\" (UniqueName: \"kubernetes.io/projected/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-kube-api-access-q6t2w\") pod \"glance-db-create-qxvzf\" (UID: \"0404436e-b5d2-4743-9d81-5bd0bab5b1d5\") " pod="openstack/glance-db-create-qxvzf" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.875755 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92d68198-8212-48d0-b42a-1be37cc135f2-operator-scripts\") pod \"glance-543a-account-create-update-5lwvv\" (UID: \"92d68198-8212-48d0-b42a-1be37cc135f2\") " pod="openstack/glance-543a-account-create-update-5lwvv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.876284 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wz9hs\" (UniqueName: \"kubernetes.io/projected/92d68198-8212-48d0-b42a-1be37cc135f2-kube-api-access-wz9hs\") pod \"glance-543a-account-create-update-5lwvv\" (UID: \"92d68198-8212-48d0-b42a-1be37cc135f2\") " pod="openstack/glance-543a-account-create-update-5lwvv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.876678 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92d68198-8212-48d0-b42a-1be37cc135f2-operator-scripts\") pod \"glance-543a-account-create-update-5lwvv\" (UID: \"92d68198-8212-48d0-b42a-1be37cc135f2\") " pod="openstack/glance-543a-account-create-update-5lwvv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.898132 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wz9hs\" (UniqueName: \"kubernetes.io/projected/92d68198-8212-48d0-b42a-1be37cc135f2-kube-api-access-wz9hs\") pod \"glance-543a-account-create-update-5lwvv\" (UID: \"92d68198-8212-48d0-b42a-1be37cc135f2\") " pod="openstack/glance-543a-account-create-update-5lwvv" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.942998 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536846-d7tbv"] Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.959482 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-qxvzf" Feb 27 16:46:00 crc kubenswrapper[4751]: I0227 16:46:00.974703 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-543a-account-create-update-5lwvv" Feb 27 16:46:01 crc kubenswrapper[4751]: I0227 16:46:01.994746 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-7cx8p"] Feb 27 16:46:01 crc kubenswrapper[4751]: I0227 16:46:01.997188 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-7cx8p" Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.001881 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-7cx8p"] Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.004216 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.097472 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2844fba2-ba90-41a6-a4d6-201c6a982417-operator-scripts\") pod \"root-account-create-update-7cx8p\" (UID: \"2844fba2-ba90-41a6-a4d6-201c6a982417\") " pod="openstack/root-account-create-update-7cx8p" Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.097543 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlv9s\" (UniqueName: \"kubernetes.io/projected/2844fba2-ba90-41a6-a4d6-201c6a982417-kube-api-access-qlv9s\") pod \"root-account-create-update-7cx8p\" (UID: \"2844fba2-ba90-41a6-a4d6-201c6a982417\") " pod="openstack/root-account-create-update-7cx8p" Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.199301 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2844fba2-ba90-41a6-a4d6-201c6a982417-operator-scripts\") pod \"root-account-create-update-7cx8p\" (UID: \"2844fba2-ba90-41a6-a4d6-201c6a982417\") " pod="openstack/root-account-create-update-7cx8p" Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.199823 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlv9s\" (UniqueName: \"kubernetes.io/projected/2844fba2-ba90-41a6-a4d6-201c6a982417-kube-api-access-qlv9s\") pod \"root-account-create-update-7cx8p\" (UID: \"2844fba2-ba90-41a6-a4d6-201c6a982417\") " pod="openstack/root-account-create-update-7cx8p" Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.201425 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2844fba2-ba90-41a6-a4d6-201c6a982417-operator-scripts\") pod \"root-account-create-update-7cx8p\" (UID: \"2844fba2-ba90-41a6-a4d6-201c6a982417\") " pod="openstack/root-account-create-update-7cx8p" Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.235822 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlv9s\" (UniqueName: \"kubernetes.io/projected/2844fba2-ba90-41a6-a4d6-201c6a982417-kube-api-access-qlv9s\") pod \"root-account-create-update-7cx8p\" (UID: \"2844fba2-ba90-41a6-a4d6-201c6a982417\") " pod="openstack/root-account-create-update-7cx8p" Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.301141 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:46:02 crc kubenswrapper[4751]: E0227 16:46:02.301297 4751 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 27 16:46:02 crc kubenswrapper[4751]: E0227 16:46:02.301311 4751 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 27 16:46:02 crc kubenswrapper[4751]: E0227 16:46:02.301360 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift podName:2fd9f1bc-399b-4282-a2cf-b76526fcfca5 nodeName:}" failed. No retries permitted until 2026-02-27 16:46:06.301345367 +0000 UTC m=+1328.448359814 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift") pod "swift-storage-0" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5") : configmap "swift-ring-files" not found Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.329061 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-7cx8p" Feb 27 16:46:02 crc kubenswrapper[4751]: W0227 16:46:02.725597 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ff1d6b8_692b_4578_91a6_69cda33c3b50.slice/crio-bc489d75e076edd01771dfd2ebf6e4126d4cb0374acbb7c46ac19ed7f24b5ef3 WatchSource:0}: Error finding container bc489d75e076edd01771dfd2ebf6e4126d4cb0374acbb7c46ac19ed7f24b5ef3: Status 404 returned error can't find the container with id bc489d75e076edd01771dfd2ebf6e4126d4cb0374acbb7c46ac19ed7f24b5ef3 Feb 27 16:46:02 crc kubenswrapper[4751]: I0227 16:46:02.920667 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536846-d7tbv" event={"ID":"9ff1d6b8-692b-4578-91a6-69cda33c3b50","Type":"ContainerStarted","Data":"bc489d75e076edd01771dfd2ebf6e4126d4cb0374acbb7c46ac19ed7f24b5ef3"} Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.244887 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-qxvzf"] Feb 27 16:46:03 crc kubenswrapper[4751]: W0227 16:46:03.250542 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0404436e_b5d2_4743_9d81_5bd0bab5b1d5.slice/crio-f4b91719fa6af98c406850b6b4f88aa47e020bdb7ec28127e59b385ef3c344f8 WatchSource:0}: Error finding container f4b91719fa6af98c406850b6b4f88aa47e020bdb7ec28127e59b385ef3c344f8: Status 404 returned error can't find the container with id f4b91719fa6af98c406850b6b4f88aa47e020bdb7ec28127e59b385ef3c344f8 Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.311087 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-7cx8p"] Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.323847 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-543a-account-create-update-5lwvv"] Feb 27 16:46:03 crc kubenswrapper[4751]: W0227 16:46:03.324458 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2844fba2_ba90_41a6_a4d6_201c6a982417.slice/crio-115e4fca7fd6313121e955aac28206f388c02205a76f4672d05b98b8f9f5f7ad WatchSource:0}: Error finding container 115e4fca7fd6313121e955aac28206f388c02205a76f4672d05b98b8f9f5f7ad: Status 404 returned error can't find the container with id 115e4fca7fd6313121e955aac28206f388c02205a76f4672d05b98b8f9f5f7ad Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.933593 4751 generic.go:334] "Generic (PLEG): container finished" podID="0404436e-b5d2-4743-9d81-5bd0bab5b1d5" containerID="a6ae7a3398231202d27c74508110644707211b970edd59282375775aa71a40be" exitCode=0 Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.933753 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-qxvzf" event={"ID":"0404436e-b5d2-4743-9d81-5bd0bab5b1d5","Type":"ContainerDied","Data":"a6ae7a3398231202d27c74508110644707211b970edd59282375775aa71a40be"} Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.934006 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-qxvzf" event={"ID":"0404436e-b5d2-4743-9d81-5bd0bab5b1d5","Type":"ContainerStarted","Data":"f4b91719fa6af98c406850b6b4f88aa47e020bdb7ec28127e59b385ef3c344f8"} Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.941162 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sz5bd" event={"ID":"10cb8075-6c76-438d-8ba7-cacfb6acd7fe","Type":"ContainerStarted","Data":"8be581f350c0a6c6b3215f93ef0de633555618e7b76dc24371e057e35a5fd5fa"} Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.945261 4751 generic.go:334] "Generic (PLEG): container finished" podID="92d68198-8212-48d0-b42a-1be37cc135f2" containerID="3fd7fd20d2274bc9adfa8baf3cba840811104b1d4341f4424d646b93404725f3" exitCode=0 Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.945360 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-543a-account-create-update-5lwvv" event={"ID":"92d68198-8212-48d0-b42a-1be37cc135f2","Type":"ContainerDied","Data":"3fd7fd20d2274bc9adfa8baf3cba840811104b1d4341f4424d646b93404725f3"} Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.945396 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-543a-account-create-update-5lwvv" event={"ID":"92d68198-8212-48d0-b42a-1be37cc135f2","Type":"ContainerStarted","Data":"620b24a4ecfe2a31cb2084c5a83688d9a5dea2d2dd68a83a3d35f4a2b02651dd"} Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.951555 4751 generic.go:334] "Generic (PLEG): container finished" podID="2844fba2-ba90-41a6-a4d6-201c6a982417" containerID="8d7ec819993aafb99d822867ec58a5121e57f592645ec15f5a71d216c630dc8b" exitCode=0 Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.951602 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-7cx8p" event={"ID":"2844fba2-ba90-41a6-a4d6-201c6a982417","Type":"ContainerDied","Data":"8d7ec819993aafb99d822867ec58a5121e57f592645ec15f5a71d216c630dc8b"} Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.951624 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-7cx8p" event={"ID":"2844fba2-ba90-41a6-a4d6-201c6a982417","Type":"ContainerStarted","Data":"115e4fca7fd6313121e955aac28206f388c02205a76f4672d05b98b8f9f5f7ad"} Feb 27 16:46:03 crc kubenswrapper[4751]: I0227 16:46:03.971525 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-sz5bd" podStartSLOduration=2.947291117 podStartE2EDuration="5.971509003s" podCreationTimestamp="2026-02-27 16:45:58 +0000 UTC" firstStartedPulling="2026-02-27 16:45:59.778257058 +0000 UTC m=+1321.925271505" lastFinishedPulling="2026-02-27 16:46:02.802474944 +0000 UTC m=+1324.949489391" observedRunningTime="2026-02-27 16:46:03.965637746 +0000 UTC m=+1326.112652193" watchObservedRunningTime="2026-02-27 16:46:03.971509003 +0000 UTC m=+1326.118523440" Feb 27 16:46:04 crc kubenswrapper[4751]: I0227 16:46:04.965045 4751 generic.go:334] "Generic (PLEG): container finished" podID="9ff1d6b8-692b-4578-91a6-69cda33c3b50" containerID="e44087f79ab11f751ff8eb2bfe6cb9757dd52869fdc3c821d99e9e86a32e5f6f" exitCode=0 Feb 27 16:46:04 crc kubenswrapper[4751]: I0227 16:46:04.965843 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536846-d7tbv" event={"ID":"9ff1d6b8-692b-4578-91a6-69cda33c3b50","Type":"ContainerDied","Data":"e44087f79ab11f751ff8eb2bfe6cb9757dd52869fdc3c821d99e9e86a32e5f6f"} Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.439083 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-543a-account-create-update-5lwvv" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.523473 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-7cx8p" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.542144 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-qxvzf" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.562700 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2844fba2-ba90-41a6-a4d6-201c6a982417-operator-scripts\") pod \"2844fba2-ba90-41a6-a4d6-201c6a982417\" (UID: \"2844fba2-ba90-41a6-a4d6-201c6a982417\") " Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.562740 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlv9s\" (UniqueName: \"kubernetes.io/projected/2844fba2-ba90-41a6-a4d6-201c6a982417-kube-api-access-qlv9s\") pod \"2844fba2-ba90-41a6-a4d6-201c6a982417\" (UID: \"2844fba2-ba90-41a6-a4d6-201c6a982417\") " Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.562774 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wz9hs\" (UniqueName: \"kubernetes.io/projected/92d68198-8212-48d0-b42a-1be37cc135f2-kube-api-access-wz9hs\") pod \"92d68198-8212-48d0-b42a-1be37cc135f2\" (UID: \"92d68198-8212-48d0-b42a-1be37cc135f2\") " Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.562810 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92d68198-8212-48d0-b42a-1be37cc135f2-operator-scripts\") pod \"92d68198-8212-48d0-b42a-1be37cc135f2\" (UID: \"92d68198-8212-48d0-b42a-1be37cc135f2\") " Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.563306 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2844fba2-ba90-41a6-a4d6-201c6a982417-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2844fba2-ba90-41a6-a4d6-201c6a982417" (UID: "2844fba2-ba90-41a6-a4d6-201c6a982417"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.563627 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92d68198-8212-48d0-b42a-1be37cc135f2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "92d68198-8212-48d0-b42a-1be37cc135f2" (UID: "92d68198-8212-48d0-b42a-1be37cc135f2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.568088 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92d68198-8212-48d0-b42a-1be37cc135f2-kube-api-access-wz9hs" (OuterVolumeSpecName: "kube-api-access-wz9hs") pod "92d68198-8212-48d0-b42a-1be37cc135f2" (UID: "92d68198-8212-48d0-b42a-1be37cc135f2"). InnerVolumeSpecName "kube-api-access-wz9hs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.568886 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2844fba2-ba90-41a6-a4d6-201c6a982417-kube-api-access-qlv9s" (OuterVolumeSpecName: "kube-api-access-qlv9s") pod "2844fba2-ba90-41a6-a4d6-201c6a982417" (UID: "2844fba2-ba90-41a6-a4d6-201c6a982417"). InnerVolumeSpecName "kube-api-access-qlv9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.664008 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-operator-scripts\") pod \"0404436e-b5d2-4743-9d81-5bd0bab5b1d5\" (UID: \"0404436e-b5d2-4743-9d81-5bd0bab5b1d5\") " Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.664163 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6t2w\" (UniqueName: \"kubernetes.io/projected/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-kube-api-access-q6t2w\") pod \"0404436e-b5d2-4743-9d81-5bd0bab5b1d5\" (UID: \"0404436e-b5d2-4743-9d81-5bd0bab5b1d5\") " Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.664575 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2844fba2-ba90-41a6-a4d6-201c6a982417-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.664601 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlv9s\" (UniqueName: \"kubernetes.io/projected/2844fba2-ba90-41a6-a4d6-201c6a982417-kube-api-access-qlv9s\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.664614 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wz9hs\" (UniqueName: \"kubernetes.io/projected/92d68198-8212-48d0-b42a-1be37cc135f2-kube-api-access-wz9hs\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.664626 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92d68198-8212-48d0-b42a-1be37cc135f2-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.664844 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0404436e-b5d2-4743-9d81-5bd0bab5b1d5" (UID: "0404436e-b5d2-4743-9d81-5bd0bab5b1d5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.668807 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-kube-api-access-q6t2w" (OuterVolumeSpecName: "kube-api-access-q6t2w") pod "0404436e-b5d2-4743-9d81-5bd0bab5b1d5" (UID: "0404436e-b5d2-4743-9d81-5bd0bab5b1d5"). InnerVolumeSpecName "kube-api-access-q6t2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.766475 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.766522 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6t2w\" (UniqueName: \"kubernetes.io/projected/0404436e-b5d2-4743-9d81-5bd0bab5b1d5-kube-api-access-q6t2w\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.982219 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-543a-account-create-update-5lwvv" event={"ID":"92d68198-8212-48d0-b42a-1be37cc135f2","Type":"ContainerDied","Data":"620b24a4ecfe2a31cb2084c5a83688d9a5dea2d2dd68a83a3d35f4a2b02651dd"} Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.982274 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="620b24a4ecfe2a31cb2084c5a83688d9a5dea2d2dd68a83a3d35f4a2b02651dd" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.983915 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-543a-account-create-update-5lwvv" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.985449 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-7cx8p" event={"ID":"2844fba2-ba90-41a6-a4d6-201c6a982417","Type":"ContainerDied","Data":"115e4fca7fd6313121e955aac28206f388c02205a76f4672d05b98b8f9f5f7ad"} Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.985532 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="115e4fca7fd6313121e955aac28206f388c02205a76f4672d05b98b8f9f5f7ad" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.985476 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-7cx8p" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.987900 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-qxvzf" Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.987906 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-qxvzf" event={"ID":"0404436e-b5d2-4743-9d81-5bd0bab5b1d5","Type":"ContainerDied","Data":"f4b91719fa6af98c406850b6b4f88aa47e020bdb7ec28127e59b385ef3c344f8"} Feb 27 16:46:05 crc kubenswrapper[4751]: I0227 16:46:05.990371 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4b91719fa6af98c406850b6b4f88aa47e020bdb7ec28127e59b385ef3c344f8" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.228473 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-6gnd6"] Feb 27 16:46:06 crc kubenswrapper[4751]: E0227 16:46:06.228937 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2844fba2-ba90-41a6-a4d6-201c6a982417" containerName="mariadb-account-create-update" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.228956 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2844fba2-ba90-41a6-a4d6-201c6a982417" containerName="mariadb-account-create-update" Feb 27 16:46:06 crc kubenswrapper[4751]: E0227 16:46:06.228995 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92d68198-8212-48d0-b42a-1be37cc135f2" containerName="mariadb-account-create-update" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.230167 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="92d68198-8212-48d0-b42a-1be37cc135f2" containerName="mariadb-account-create-update" Feb 27 16:46:06 crc kubenswrapper[4751]: E0227 16:46:06.230222 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0404436e-b5d2-4743-9d81-5bd0bab5b1d5" containerName="mariadb-database-create" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.230232 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="0404436e-b5d2-4743-9d81-5bd0bab5b1d5" containerName="mariadb-database-create" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.230473 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="92d68198-8212-48d0-b42a-1be37cc135f2" containerName="mariadb-account-create-update" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.230494 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2844fba2-ba90-41a6-a4d6-201c6a982417" containerName="mariadb-account-create-update" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.230507 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="0404436e-b5d2-4743-9d81-5bd0bab5b1d5" containerName="mariadb-database-create" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.231468 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-6gnd6" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.239801 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-6gnd6"] Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.275532 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4wfb\" (UniqueName: \"kubernetes.io/projected/6f256e1b-bd56-4dd3-a150-7660ab6d222f-kube-api-access-l4wfb\") pod \"keystone-db-create-6gnd6\" (UID: \"6f256e1b-bd56-4dd3-a150-7660ab6d222f\") " pod="openstack/keystone-db-create-6gnd6" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.275587 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f256e1b-bd56-4dd3-a150-7660ab6d222f-operator-scripts\") pod \"keystone-db-create-6gnd6\" (UID: \"6f256e1b-bd56-4dd3-a150-7660ab6d222f\") " pod="openstack/keystone-db-create-6gnd6" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.340122 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-0a1f-account-create-update-xg72f"] Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.344044 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a1f-account-create-update-xg72f" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.345606 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.352873 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0a1f-account-create-update-xg72f"] Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.377104 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0958cc92-4dcb-4e10-b592-b3800bfe7a18-operator-scripts\") pod \"keystone-0a1f-account-create-update-xg72f\" (UID: \"0958cc92-4dcb-4e10-b592-b3800bfe7a18\") " pod="openstack/keystone-0a1f-account-create-update-xg72f" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.377178 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4wfb\" (UniqueName: \"kubernetes.io/projected/6f256e1b-bd56-4dd3-a150-7660ab6d222f-kube-api-access-l4wfb\") pod \"keystone-db-create-6gnd6\" (UID: \"6f256e1b-bd56-4dd3-a150-7660ab6d222f\") " pod="openstack/keystone-db-create-6gnd6" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.377213 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f256e1b-bd56-4dd3-a150-7660ab6d222f-operator-scripts\") pod \"keystone-db-create-6gnd6\" (UID: \"6f256e1b-bd56-4dd3-a150-7660ab6d222f\") " pod="openstack/keystone-db-create-6gnd6" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.377250 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.377274 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8s49\" (UniqueName: \"kubernetes.io/projected/0958cc92-4dcb-4e10-b592-b3800bfe7a18-kube-api-access-w8s49\") pod \"keystone-0a1f-account-create-update-xg72f\" (UID: \"0958cc92-4dcb-4e10-b592-b3800bfe7a18\") " pod="openstack/keystone-0a1f-account-create-update-xg72f" Feb 27 16:46:06 crc kubenswrapper[4751]: E0227 16:46:06.377544 4751 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 27 16:46:06 crc kubenswrapper[4751]: E0227 16:46:06.377559 4751 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 27 16:46:06 crc kubenswrapper[4751]: E0227 16:46:06.377603 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift podName:2fd9f1bc-399b-4282-a2cf-b76526fcfca5 nodeName:}" failed. No retries permitted until 2026-02-27 16:46:14.377582866 +0000 UTC m=+1336.524597313 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift") pod "swift-storage-0" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5") : configmap "swift-ring-files" not found Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.378258 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f256e1b-bd56-4dd3-a150-7660ab6d222f-operator-scripts\") pod \"keystone-db-create-6gnd6\" (UID: \"6f256e1b-bd56-4dd3-a150-7660ab6d222f\") " pod="openstack/keystone-db-create-6gnd6" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.386706 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536846-d7tbv" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.395609 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4wfb\" (UniqueName: \"kubernetes.io/projected/6f256e1b-bd56-4dd3-a150-7660ab6d222f-kube-api-access-l4wfb\") pod \"keystone-db-create-6gnd6\" (UID: \"6f256e1b-bd56-4dd3-a150-7660ab6d222f\") " pod="openstack/keystone-db-create-6gnd6" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.443891 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-c2k4l"] Feb 27 16:46:06 crc kubenswrapper[4751]: E0227 16:46:06.444189 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ff1d6b8-692b-4578-91a6-69cda33c3b50" containerName="oc" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.444205 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ff1d6b8-692b-4578-91a6-69cda33c3b50" containerName="oc" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.444358 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ff1d6b8-692b-4578-91a6-69cda33c3b50" containerName="oc" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.444887 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-c2k4l" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.456485 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-c2k4l"] Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.478295 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkg9q\" (UniqueName: \"kubernetes.io/projected/9ff1d6b8-692b-4578-91a6-69cda33c3b50-kube-api-access-hkg9q\") pod \"9ff1d6b8-692b-4578-91a6-69cda33c3b50\" (UID: \"9ff1d6b8-692b-4578-91a6-69cda33c3b50\") " Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.478595 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0958cc92-4dcb-4e10-b592-b3800bfe7a18-operator-scripts\") pod \"keystone-0a1f-account-create-update-xg72f\" (UID: \"0958cc92-4dcb-4e10-b592-b3800bfe7a18\") " pod="openstack/keystone-0a1f-account-create-update-xg72f" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.478630 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88022d82-f750-458c-8f97-a34d3eaef634-operator-scripts\") pod \"placement-db-create-c2k4l\" (UID: \"88022d82-f750-458c-8f97-a34d3eaef634\") " pod="openstack/placement-db-create-c2k4l" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.478695 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8s49\" (UniqueName: \"kubernetes.io/projected/0958cc92-4dcb-4e10-b592-b3800bfe7a18-kube-api-access-w8s49\") pod \"keystone-0a1f-account-create-update-xg72f\" (UID: \"0958cc92-4dcb-4e10-b592-b3800bfe7a18\") " pod="openstack/keystone-0a1f-account-create-update-xg72f" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.478715 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v2nd\" (UniqueName: \"kubernetes.io/projected/88022d82-f750-458c-8f97-a34d3eaef634-kube-api-access-8v2nd\") pod \"placement-db-create-c2k4l\" (UID: \"88022d82-f750-458c-8f97-a34d3eaef634\") " pod="openstack/placement-db-create-c2k4l" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.479900 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0958cc92-4dcb-4e10-b592-b3800bfe7a18-operator-scripts\") pod \"keystone-0a1f-account-create-update-xg72f\" (UID: \"0958cc92-4dcb-4e10-b592-b3800bfe7a18\") " pod="openstack/keystone-0a1f-account-create-update-xg72f" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.484602 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ff1d6b8-692b-4578-91a6-69cda33c3b50-kube-api-access-hkg9q" (OuterVolumeSpecName: "kube-api-access-hkg9q") pod "9ff1d6b8-692b-4578-91a6-69cda33c3b50" (UID: "9ff1d6b8-692b-4578-91a6-69cda33c3b50"). InnerVolumeSpecName "kube-api-access-hkg9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.497281 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8s49\" (UniqueName: \"kubernetes.io/projected/0958cc92-4dcb-4e10-b592-b3800bfe7a18-kube-api-access-w8s49\") pod \"keystone-0a1f-account-create-update-xg72f\" (UID: \"0958cc92-4dcb-4e10-b592-b3800bfe7a18\") " pod="openstack/keystone-0a1f-account-create-update-xg72f" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.519951 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-aa96-account-create-update-d2dtn"] Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.521138 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-aa96-account-create-update-d2dtn" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.522912 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.541513 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-aa96-account-create-update-d2dtn"] Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.560235 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-6gnd6" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.580102 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88022d82-f750-458c-8f97-a34d3eaef634-operator-scripts\") pod \"placement-db-create-c2k4l\" (UID: \"88022d82-f750-458c-8f97-a34d3eaef634\") " pod="openstack/placement-db-create-c2k4l" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.580239 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v2nd\" (UniqueName: \"kubernetes.io/projected/88022d82-f750-458c-8f97-a34d3eaef634-kube-api-access-8v2nd\") pod \"placement-db-create-c2k4l\" (UID: \"88022d82-f750-458c-8f97-a34d3eaef634\") " pod="openstack/placement-db-create-c2k4l" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.580272 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81347661-57c9-48d8-8b26-c6eddbfe887c-operator-scripts\") pod \"placement-aa96-account-create-update-d2dtn\" (UID: \"81347661-57c9-48d8-8b26-c6eddbfe887c\") " pod="openstack/placement-aa96-account-create-update-d2dtn" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.580302 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpqxw\" (UniqueName: \"kubernetes.io/projected/81347661-57c9-48d8-8b26-c6eddbfe887c-kube-api-access-wpqxw\") pod \"placement-aa96-account-create-update-d2dtn\" (UID: \"81347661-57c9-48d8-8b26-c6eddbfe887c\") " pod="openstack/placement-aa96-account-create-update-d2dtn" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.580364 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkg9q\" (UniqueName: \"kubernetes.io/projected/9ff1d6b8-692b-4578-91a6-69cda33c3b50-kube-api-access-hkg9q\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.581035 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88022d82-f750-458c-8f97-a34d3eaef634-operator-scripts\") pod \"placement-db-create-c2k4l\" (UID: \"88022d82-f750-458c-8f97-a34d3eaef634\") " pod="openstack/placement-db-create-c2k4l" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.597924 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v2nd\" (UniqueName: \"kubernetes.io/projected/88022d82-f750-458c-8f97-a34d3eaef634-kube-api-access-8v2nd\") pod \"placement-db-create-c2k4l\" (UID: \"88022d82-f750-458c-8f97-a34d3eaef634\") " pod="openstack/placement-db-create-c2k4l" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.681512 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpqxw\" (UniqueName: \"kubernetes.io/projected/81347661-57c9-48d8-8b26-c6eddbfe887c-kube-api-access-wpqxw\") pod \"placement-aa96-account-create-update-d2dtn\" (UID: \"81347661-57c9-48d8-8b26-c6eddbfe887c\") " pod="openstack/placement-aa96-account-create-update-d2dtn" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.681918 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81347661-57c9-48d8-8b26-c6eddbfe887c-operator-scripts\") pod \"placement-aa96-account-create-update-d2dtn\" (UID: \"81347661-57c9-48d8-8b26-c6eddbfe887c\") " pod="openstack/placement-aa96-account-create-update-d2dtn" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.683098 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81347661-57c9-48d8-8b26-c6eddbfe887c-operator-scripts\") pod \"placement-aa96-account-create-update-d2dtn\" (UID: \"81347661-57c9-48d8-8b26-c6eddbfe887c\") " pod="openstack/placement-aa96-account-create-update-d2dtn" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.710047 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpqxw\" (UniqueName: \"kubernetes.io/projected/81347661-57c9-48d8-8b26-c6eddbfe887c-kube-api-access-wpqxw\") pod \"placement-aa96-account-create-update-d2dtn\" (UID: \"81347661-57c9-48d8-8b26-c6eddbfe887c\") " pod="openstack/placement-aa96-account-create-update-d2dtn" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.748729 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a1f-account-create-update-xg72f" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.766394 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-c2k4l" Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.874860 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-aa96-account-create-update-d2dtn" Feb 27 16:46:06 crc kubenswrapper[4751]: W0227 16:46:06.979385 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f256e1b_bd56_4dd3_a150_7660ab6d222f.slice/crio-c534113bcf02f9b59164a2ae01eee5aaca0047a87b71fe7b9115219a02af64a7 WatchSource:0}: Error finding container c534113bcf02f9b59164a2ae01eee5aaca0047a87b71fe7b9115219a02af64a7: Status 404 returned error can't find the container with id c534113bcf02f9b59164a2ae01eee5aaca0047a87b71fe7b9115219a02af64a7 Feb 27 16:46:06 crc kubenswrapper[4751]: I0227 16:46:06.979767 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-6gnd6"] Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.007134 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-6gnd6" event={"ID":"6f256e1b-bd56-4dd3-a150-7660ab6d222f","Type":"ContainerStarted","Data":"c534113bcf02f9b59164a2ae01eee5aaca0047a87b71fe7b9115219a02af64a7"} Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.015022 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536846-d7tbv" event={"ID":"9ff1d6b8-692b-4578-91a6-69cda33c3b50","Type":"ContainerDied","Data":"bc489d75e076edd01771dfd2ebf6e4126d4cb0374acbb7c46ac19ed7f24b5ef3"} Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.015061 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc489d75e076edd01771dfd2ebf6e4126d4cb0374acbb7c46ac19ed7f24b5ef3" Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.015121 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536846-d7tbv" Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.210947 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-aa96-account-create-update-d2dtn"] Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.230274 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0a1f-account-create-update-xg72f"] Feb 27 16:46:07 crc kubenswrapper[4751]: W0227 16:46:07.238359 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0958cc92_4dcb_4e10_b592_b3800bfe7a18.slice/crio-7a8a1d2b06d2eb0fe6050504e87abd6a682f65a88496800209c1f1d244f0234e WatchSource:0}: Error finding container 7a8a1d2b06d2eb0fe6050504e87abd6a682f65a88496800209c1f1d244f0234e: Status 404 returned error can't find the container with id 7a8a1d2b06d2eb0fe6050504e87abd6a682f65a88496800209c1f1d244f0234e Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.305502 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-c2k4l"] Feb 27 16:46:07 crc kubenswrapper[4751]: W0227 16:46:07.319352 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88022d82_f750_458c_8f97_a34d3eaef634.slice/crio-a7bc0843db523bfbc616a0c1025dbb9bc27907d345a557e2568ad32a4437b438 WatchSource:0}: Error finding container a7bc0843db523bfbc616a0c1025dbb9bc27907d345a557e2568ad32a4437b438: Status 404 returned error can't find the container with id a7bc0843db523bfbc616a0c1025dbb9bc27907d345a557e2568ad32a4437b438 Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.452537 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536840-gt6tq"] Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.458252 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536840-gt6tq"] Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.733765 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.800803 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vpgr9"] Feb 27 16:46:07 crc kubenswrapper[4751]: I0227 16:46:07.801069 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" podUID="4dd2754d-ed93-4494-ae20-b38295ba9fff" containerName="dnsmasq-dns" containerID="cri-o://ab41f5ccb0a4cf0b5b23f112245d7c026e3757eb63d5c8a9f13f9a431eed3bad" gracePeriod=10 Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.026267 4751 generic.go:334] "Generic (PLEG): container finished" podID="81347661-57c9-48d8-8b26-c6eddbfe887c" containerID="0ae2f7b56352c4a7cdff2127e705d4c179feca93ded8ed875ae44c12fa5c38ae" exitCode=0 Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.026347 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-aa96-account-create-update-d2dtn" event={"ID":"81347661-57c9-48d8-8b26-c6eddbfe887c","Type":"ContainerDied","Data":"0ae2f7b56352c4a7cdff2127e705d4c179feca93ded8ed875ae44c12fa5c38ae"} Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.026404 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-aa96-account-create-update-d2dtn" event={"ID":"81347661-57c9-48d8-8b26-c6eddbfe887c","Type":"ContainerStarted","Data":"3bd94e47185502f77dd74387dcaf3f6d9bd6d01e19507495d7e058fe4ecc2d38"} Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.028958 4751 generic.go:334] "Generic (PLEG): container finished" podID="88022d82-f750-458c-8f97-a34d3eaef634" containerID="090f901a8ba3dcff42ed05328e63600b87dc4236801a1d7eac200161a8114a4b" exitCode=0 Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.029014 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-c2k4l" event={"ID":"88022d82-f750-458c-8f97-a34d3eaef634","Type":"ContainerDied","Data":"090f901a8ba3dcff42ed05328e63600b87dc4236801a1d7eac200161a8114a4b"} Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.029044 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-c2k4l" event={"ID":"88022d82-f750-458c-8f97-a34d3eaef634","Type":"ContainerStarted","Data":"a7bc0843db523bfbc616a0c1025dbb9bc27907d345a557e2568ad32a4437b438"} Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.044111 4751 generic.go:334] "Generic (PLEG): container finished" podID="6f256e1b-bd56-4dd3-a150-7660ab6d222f" containerID="630f3c9c80ad82fb5029211f5ab0717c1e38f33af63ff70a36579e7512c954e1" exitCode=0 Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.044227 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-6gnd6" event={"ID":"6f256e1b-bd56-4dd3-a150-7660ab6d222f","Type":"ContainerDied","Data":"630f3c9c80ad82fb5029211f5ab0717c1e38f33af63ff70a36579e7512c954e1"} Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.053613 4751 generic.go:334] "Generic (PLEG): container finished" podID="4dd2754d-ed93-4494-ae20-b38295ba9fff" containerID="ab41f5ccb0a4cf0b5b23f112245d7c026e3757eb63d5c8a9f13f9a431eed3bad" exitCode=0 Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.053701 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" event={"ID":"4dd2754d-ed93-4494-ae20-b38295ba9fff","Type":"ContainerDied","Data":"ab41f5ccb0a4cf0b5b23f112245d7c026e3757eb63d5c8a9f13f9a431eed3bad"} Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.054848 4751 generic.go:334] "Generic (PLEG): container finished" podID="0958cc92-4dcb-4e10-b592-b3800bfe7a18" containerID="3d08ca10ddfe45db584f322c992bf5201be412cd465225bf79f5f459fc3b794d" exitCode=0 Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.054874 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0a1f-account-create-update-xg72f" event={"ID":"0958cc92-4dcb-4e10-b592-b3800bfe7a18","Type":"ContainerDied","Data":"3d08ca10ddfe45db584f322c992bf5201be412cd465225bf79f5f459fc3b794d"} Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.054888 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0a1f-account-create-update-xg72f" event={"ID":"0958cc92-4dcb-4e10-b592-b3800bfe7a18","Type":"ContainerStarted","Data":"7a8a1d2b06d2eb0fe6050504e87abd6a682f65a88496800209c1f1d244f0234e"} Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.243523 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.314189 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-config\") pod \"4dd2754d-ed93-4494-ae20-b38295ba9fff\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.314269 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dxpk\" (UniqueName: \"kubernetes.io/projected/4dd2754d-ed93-4494-ae20-b38295ba9fff-kube-api-access-9dxpk\") pod \"4dd2754d-ed93-4494-ae20-b38295ba9fff\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.314388 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-dns-svc\") pod \"4dd2754d-ed93-4494-ae20-b38295ba9fff\" (UID: \"4dd2754d-ed93-4494-ae20-b38295ba9fff\") " Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.319440 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dd2754d-ed93-4494-ae20-b38295ba9fff-kube-api-access-9dxpk" (OuterVolumeSpecName: "kube-api-access-9dxpk") pod "4dd2754d-ed93-4494-ae20-b38295ba9fff" (UID: "4dd2754d-ed93-4494-ae20-b38295ba9fff"). InnerVolumeSpecName "kube-api-access-9dxpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.352989 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-config" (OuterVolumeSpecName: "config") pod "4dd2754d-ed93-4494-ae20-b38295ba9fff" (UID: "4dd2754d-ed93-4494-ae20-b38295ba9fff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.370072 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4dd2754d-ed93-4494-ae20-b38295ba9fff" (UID: "4dd2754d-ed93-4494-ae20-b38295ba9fff"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.417768 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.418090 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dxpk\" (UniqueName: \"kubernetes.io/projected/4dd2754d-ed93-4494-ae20-b38295ba9fff-kube-api-access-9dxpk\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.418102 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4dd2754d-ed93-4494-ae20-b38295ba9fff-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.432139 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-7cx8p"] Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.438647 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-7cx8p"] Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.533625 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2844fba2-ba90-41a6-a4d6-201c6a982417" path="/var/lib/kubelet/pods/2844fba2-ba90-41a6-a4d6-201c6a982417/volumes" Feb 27 16:46:08 crc kubenswrapper[4751]: I0227 16:46:08.534816 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95d5d54c-def0-4b9e-b678-e708dec2ecc5" path="/var/lib/kubelet/pods/95d5d54c-def0-4b9e-b678-e708dec2ecc5/volumes" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.064748 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.064798 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-vpgr9" event={"ID":"4dd2754d-ed93-4494-ae20-b38295ba9fff","Type":"ContainerDied","Data":"2cce647053ed8757960371c5031267cfd78c5b0564bdfae1078d84853e72c0db"} Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.064848 4751 scope.go:117] "RemoveContainer" containerID="ab41f5ccb0a4cf0b5b23f112245d7c026e3757eb63d5c8a9f13f9a431eed3bad" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.066775 4751 generic.go:334] "Generic (PLEG): container finished" podID="51a81c6a-6814-412d-b77d-e741f1f74446" containerID="4c6716148a74ea8af28ec00f8d9776e6a9149b4724fe5543af6b7a72f9411e92" exitCode=0 Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.066840 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"51a81c6a-6814-412d-b77d-e741f1f74446","Type":"ContainerDied","Data":"4c6716148a74ea8af28ec00f8d9776e6a9149b4724fe5543af6b7a72f9411e92"} Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.069576 4751 generic.go:334] "Generic (PLEG): container finished" podID="cecf602c-dec2-40c6-922c-bf84b707b1b9" containerID="90d048165126f4b62e9010d52adea94a9bf9162b44553551cc7d28985890a0a2" exitCode=0 Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.069661 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"cecf602c-dec2-40c6-922c-bf84b707b1b9","Type":"ContainerDied","Data":"90d048165126f4b62e9010d52adea94a9bf9162b44553551cc7d28985890a0a2"} Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.088747 4751 scope.go:117] "RemoveContainer" containerID="544f50627f746ac00b0478377ae0f0bde6c9e30bdfd7fa475e9d51ee964262ad" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.118835 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vpgr9"] Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.146755 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vpgr9"] Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.467588 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-6gnd6" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.650829 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f256e1b-bd56-4dd3-a150-7660ab6d222f-operator-scripts\") pod \"6f256e1b-bd56-4dd3-a150-7660ab6d222f\" (UID: \"6f256e1b-bd56-4dd3-a150-7660ab6d222f\") " Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.651250 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4wfb\" (UniqueName: \"kubernetes.io/projected/6f256e1b-bd56-4dd3-a150-7660ab6d222f-kube-api-access-l4wfb\") pod \"6f256e1b-bd56-4dd3-a150-7660ab6d222f\" (UID: \"6f256e1b-bd56-4dd3-a150-7660ab6d222f\") " Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.651244 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f256e1b-bd56-4dd3-a150-7660ab6d222f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6f256e1b-bd56-4dd3-a150-7660ab6d222f" (UID: "6f256e1b-bd56-4dd3-a150-7660ab6d222f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.651978 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f256e1b-bd56-4dd3-a150-7660ab6d222f-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.653508 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-aa96-account-create-update-d2dtn" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.656206 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f256e1b-bd56-4dd3-a150-7660ab6d222f-kube-api-access-l4wfb" (OuterVolumeSpecName: "kube-api-access-l4wfb") pod "6f256e1b-bd56-4dd3-a150-7660ab6d222f" (UID: "6f256e1b-bd56-4dd3-a150-7660ab6d222f"). InnerVolumeSpecName "kube-api-access-l4wfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.659264 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-c2k4l" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.664713 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a1f-account-create-update-xg72f" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.753168 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81347661-57c9-48d8-8b26-c6eddbfe887c-operator-scripts\") pod \"81347661-57c9-48d8-8b26-c6eddbfe887c\" (UID: \"81347661-57c9-48d8-8b26-c6eddbfe887c\") " Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.753234 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88022d82-f750-458c-8f97-a34d3eaef634-operator-scripts\") pod \"88022d82-f750-458c-8f97-a34d3eaef634\" (UID: \"88022d82-f750-458c-8f97-a34d3eaef634\") " Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.753275 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8v2nd\" (UniqueName: \"kubernetes.io/projected/88022d82-f750-458c-8f97-a34d3eaef634-kube-api-access-8v2nd\") pod \"88022d82-f750-458c-8f97-a34d3eaef634\" (UID: \"88022d82-f750-458c-8f97-a34d3eaef634\") " Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.753296 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0958cc92-4dcb-4e10-b592-b3800bfe7a18-operator-scripts\") pod \"0958cc92-4dcb-4e10-b592-b3800bfe7a18\" (UID: \"0958cc92-4dcb-4e10-b592-b3800bfe7a18\") " Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.753360 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpqxw\" (UniqueName: \"kubernetes.io/projected/81347661-57c9-48d8-8b26-c6eddbfe887c-kube-api-access-wpqxw\") pod \"81347661-57c9-48d8-8b26-c6eddbfe887c\" (UID: \"81347661-57c9-48d8-8b26-c6eddbfe887c\") " Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.753413 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8s49\" (UniqueName: \"kubernetes.io/projected/0958cc92-4dcb-4e10-b592-b3800bfe7a18-kube-api-access-w8s49\") pod \"0958cc92-4dcb-4e10-b592-b3800bfe7a18\" (UID: \"0958cc92-4dcb-4e10-b592-b3800bfe7a18\") " Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.753645 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4wfb\" (UniqueName: \"kubernetes.io/projected/6f256e1b-bd56-4dd3-a150-7660ab6d222f-kube-api-access-l4wfb\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.754330 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0958cc92-4dcb-4e10-b592-b3800bfe7a18-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0958cc92-4dcb-4e10-b592-b3800bfe7a18" (UID: "0958cc92-4dcb-4e10-b592-b3800bfe7a18"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.754524 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88022d82-f750-458c-8f97-a34d3eaef634-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "88022d82-f750-458c-8f97-a34d3eaef634" (UID: "88022d82-f750-458c-8f97-a34d3eaef634"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.754639 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81347661-57c9-48d8-8b26-c6eddbfe887c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "81347661-57c9-48d8-8b26-c6eddbfe887c" (UID: "81347661-57c9-48d8-8b26-c6eddbfe887c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.757068 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88022d82-f750-458c-8f97-a34d3eaef634-kube-api-access-8v2nd" (OuterVolumeSpecName: "kube-api-access-8v2nd") pod "88022d82-f750-458c-8f97-a34d3eaef634" (UID: "88022d82-f750-458c-8f97-a34d3eaef634"). InnerVolumeSpecName "kube-api-access-8v2nd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.757863 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81347661-57c9-48d8-8b26-c6eddbfe887c-kube-api-access-wpqxw" (OuterVolumeSpecName: "kube-api-access-wpqxw") pod "81347661-57c9-48d8-8b26-c6eddbfe887c" (UID: "81347661-57c9-48d8-8b26-c6eddbfe887c"). InnerVolumeSpecName "kube-api-access-wpqxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.758088 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0958cc92-4dcb-4e10-b592-b3800bfe7a18-kube-api-access-w8s49" (OuterVolumeSpecName: "kube-api-access-w8s49") pod "0958cc92-4dcb-4e10-b592-b3800bfe7a18" (UID: "0958cc92-4dcb-4e10-b592-b3800bfe7a18"). InnerVolumeSpecName "kube-api-access-w8s49". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.854390 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpqxw\" (UniqueName: \"kubernetes.io/projected/81347661-57c9-48d8-8b26-c6eddbfe887c-kube-api-access-wpqxw\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.854704 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8s49\" (UniqueName: \"kubernetes.io/projected/0958cc92-4dcb-4e10-b592-b3800bfe7a18-kube-api-access-w8s49\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.854717 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81347661-57c9-48d8-8b26-c6eddbfe887c-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.854726 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88022d82-f750-458c-8f97-a34d3eaef634-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.854734 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8v2nd\" (UniqueName: \"kubernetes.io/projected/88022d82-f750-458c-8f97-a34d3eaef634-kube-api-access-8v2nd\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:09 crc kubenswrapper[4751]: I0227 16:46:09.854742 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0958cc92-4dcb-4e10-b592-b3800bfe7a18-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.080620 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-aa96-account-create-update-d2dtn" event={"ID":"81347661-57c9-48d8-8b26-c6eddbfe887c","Type":"ContainerDied","Data":"3bd94e47185502f77dd74387dcaf3f6d9bd6d01e19507495d7e058fe4ecc2d38"} Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.080674 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3bd94e47185502f77dd74387dcaf3f6d9bd6d01e19507495d7e058fe4ecc2d38" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.080635 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-aa96-account-create-update-d2dtn" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.083057 4751 generic.go:334] "Generic (PLEG): container finished" podID="10cb8075-6c76-438d-8ba7-cacfb6acd7fe" containerID="8be581f350c0a6c6b3215f93ef0de633555618e7b76dc24371e057e35a5fd5fa" exitCode=0 Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.083185 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sz5bd" event={"ID":"10cb8075-6c76-438d-8ba7-cacfb6acd7fe","Type":"ContainerDied","Data":"8be581f350c0a6c6b3215f93ef0de633555618e7b76dc24371e057e35a5fd5fa"} Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.086010 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-c2k4l" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.086013 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-c2k4l" event={"ID":"88022d82-f750-458c-8f97-a34d3eaef634","Type":"ContainerDied","Data":"a7bc0843db523bfbc616a0c1025dbb9bc27907d345a557e2568ad32a4437b438"} Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.086536 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7bc0843db523bfbc616a0c1025dbb9bc27907d345a557e2568ad32a4437b438" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.088033 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-6gnd6" event={"ID":"6f256e1b-bd56-4dd3-a150-7660ab6d222f","Type":"ContainerDied","Data":"c534113bcf02f9b59164a2ae01eee5aaca0047a87b71fe7b9115219a02af64a7"} Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.088072 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c534113bcf02f9b59164a2ae01eee5aaca0047a87b71fe7b9115219a02af64a7" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.088043 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-6gnd6" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.092325 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0a1f-account-create-update-xg72f" event={"ID":"0958cc92-4dcb-4e10-b592-b3800bfe7a18","Type":"ContainerDied","Data":"7a8a1d2b06d2eb0fe6050504e87abd6a682f65a88496800209c1f1d244f0234e"} Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.092355 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a8a1d2b06d2eb0fe6050504e87abd6a682f65a88496800209c1f1d244f0234e" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.092368 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a1f-account-create-update-xg72f" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.094874 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"51a81c6a-6814-412d-b77d-e741f1f74446","Type":"ContainerStarted","Data":"56a09490ed1f916c96c436deb77a88d652e14cee4afd925713481445d7f435ab"} Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.095068 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.096884 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"cecf602c-dec2-40c6-922c-bf84b707b1b9","Type":"ContainerStarted","Data":"549fd5c24da2dfcd4fa0ba0f62c30ff6278b4f64c3189582850edb5093bc8b67"} Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.097074 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.153049 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.579769357 podStartE2EDuration="1m1.153030889s" podCreationTimestamp="2026-02-27 16:45:09 +0000 UTC" firstStartedPulling="2026-02-27 16:45:11.867663665 +0000 UTC m=+1274.014678112" lastFinishedPulling="2026-02-27 16:45:35.440925197 +0000 UTC m=+1297.587939644" observedRunningTime="2026-02-27 16:46:10.149580387 +0000 UTC m=+1332.296594834" watchObservedRunningTime="2026-02-27 16:46:10.153030889 +0000 UTC m=+1332.300045336" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.181139 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.42404589 podStartE2EDuration="1m0.181122659s" podCreationTimestamp="2026-02-27 16:45:10 +0000 UTC" firstStartedPulling="2026-02-27 16:45:12.659184199 +0000 UTC m=+1274.806198646" lastFinishedPulling="2026-02-27 16:45:35.416260968 +0000 UTC m=+1297.563275415" observedRunningTime="2026-02-27 16:46:10.174252676 +0000 UTC m=+1332.321267123" watchObservedRunningTime="2026-02-27 16:46:10.181122659 +0000 UTC m=+1332.328137096" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.541281 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dd2754d-ed93-4494-ae20-b38295ba9fff" path="/var/lib/kubelet/pods/4dd2754d-ed93-4494-ae20-b38295ba9fff/volumes" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.788994 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-2pj4h"] Feb 27 16:46:10 crc kubenswrapper[4751]: E0227 16:46:10.789392 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f256e1b-bd56-4dd3-a150-7660ab6d222f" containerName="mariadb-database-create" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789431 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f256e1b-bd56-4dd3-a150-7660ab6d222f" containerName="mariadb-database-create" Feb 27 16:46:10 crc kubenswrapper[4751]: E0227 16:46:10.789454 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dd2754d-ed93-4494-ae20-b38295ba9fff" containerName="dnsmasq-dns" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789462 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dd2754d-ed93-4494-ae20-b38295ba9fff" containerName="dnsmasq-dns" Feb 27 16:46:10 crc kubenswrapper[4751]: E0227 16:46:10.789480 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88022d82-f750-458c-8f97-a34d3eaef634" containerName="mariadb-database-create" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789489 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="88022d82-f750-458c-8f97-a34d3eaef634" containerName="mariadb-database-create" Feb 27 16:46:10 crc kubenswrapper[4751]: E0227 16:46:10.789505 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0958cc92-4dcb-4e10-b592-b3800bfe7a18" containerName="mariadb-account-create-update" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789512 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="0958cc92-4dcb-4e10-b592-b3800bfe7a18" containerName="mariadb-account-create-update" Feb 27 16:46:10 crc kubenswrapper[4751]: E0227 16:46:10.789527 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dd2754d-ed93-4494-ae20-b38295ba9fff" containerName="init" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789534 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dd2754d-ed93-4494-ae20-b38295ba9fff" containerName="init" Feb 27 16:46:10 crc kubenswrapper[4751]: E0227 16:46:10.789545 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81347661-57c9-48d8-8b26-c6eddbfe887c" containerName="mariadb-account-create-update" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789552 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="81347661-57c9-48d8-8b26-c6eddbfe887c" containerName="mariadb-account-create-update" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789737 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="81347661-57c9-48d8-8b26-c6eddbfe887c" containerName="mariadb-account-create-update" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789759 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f256e1b-bd56-4dd3-a150-7660ab6d222f" containerName="mariadb-database-create" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789773 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dd2754d-ed93-4494-ae20-b38295ba9fff" containerName="dnsmasq-dns" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789782 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="0958cc92-4dcb-4e10-b592-b3800bfe7a18" containerName="mariadb-account-create-update" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.789793 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="88022d82-f750-458c-8f97-a34d3eaef634" containerName="mariadb-database-create" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.790402 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.795972 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-n4zl7" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.796643 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.802641 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-2pj4h"] Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.971426 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gfvz\" (UniqueName: \"kubernetes.io/projected/51f9db5a-ab58-4795-b09f-c2df5406c0cf-kube-api-access-7gfvz\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.971484 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-combined-ca-bundle\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.971519 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-config-data\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:10 crc kubenswrapper[4751]: I0227 16:46:10.971611 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-db-sync-config-data\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.073212 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-db-sync-config-data\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.073280 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gfvz\" (UniqueName: \"kubernetes.io/projected/51f9db5a-ab58-4795-b09f-c2df5406c0cf-kube-api-access-7gfvz\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.073312 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-combined-ca-bundle\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.073340 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-config-data\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.078560 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-db-sync-config-data\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.078940 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-combined-ca-bundle\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.079952 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-config-data\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.101650 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gfvz\" (UniqueName: \"kubernetes.io/projected/51f9db5a-ab58-4795-b09f-c2df5406c0cf-kube-api-access-7gfvz\") pod \"glance-db-sync-2pj4h\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.110802 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.473717 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.581149 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-combined-ca-bundle\") pod \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.581308 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-ring-data-devices\") pod \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.581386 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-etc-swift\") pod \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.581454 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-dispersionconf\") pod \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.581490 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rp5f\" (UniqueName: \"kubernetes.io/projected/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-kube-api-access-7rp5f\") pod \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.581529 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-swiftconf\") pod \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.581564 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-scripts\") pod \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\" (UID: \"10cb8075-6c76-438d-8ba7-cacfb6acd7fe\") " Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.582380 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "10cb8075-6c76-438d-8ba7-cacfb6acd7fe" (UID: "10cb8075-6c76-438d-8ba7-cacfb6acd7fe"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.582399 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "10cb8075-6c76-438d-8ba7-cacfb6acd7fe" (UID: "10cb8075-6c76-438d-8ba7-cacfb6acd7fe"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.592906 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-kube-api-access-7rp5f" (OuterVolumeSpecName: "kube-api-access-7rp5f") pod "10cb8075-6c76-438d-8ba7-cacfb6acd7fe" (UID: "10cb8075-6c76-438d-8ba7-cacfb6acd7fe"). InnerVolumeSpecName "kube-api-access-7rp5f". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.596660 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "10cb8075-6c76-438d-8ba7-cacfb6acd7fe" (UID: "10cb8075-6c76-438d-8ba7-cacfb6acd7fe"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.609535 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10cb8075-6c76-438d-8ba7-cacfb6acd7fe" (UID: "10cb8075-6c76-438d-8ba7-cacfb6acd7fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.615594 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "10cb8075-6c76-438d-8ba7-cacfb6acd7fe" (UID: "10cb8075-6c76-438d-8ba7-cacfb6acd7fe"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.626341 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-scripts" (OuterVolumeSpecName: "scripts") pod "10cb8075-6c76-438d-8ba7-cacfb6acd7fe" (UID: "10cb8075-6c76-438d-8ba7-cacfb6acd7fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.686222 4751 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-dispersionconf\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.686254 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rp5f\" (UniqueName: \"kubernetes.io/projected/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-kube-api-access-7rp5f\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.686264 4751 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-swiftconf\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.686273 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.686283 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.686292 4751 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-ring-data-devices\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.686301 4751 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/10cb8075-6c76-438d-8ba7-cacfb6acd7fe-etc-swift\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:11 crc kubenswrapper[4751]: I0227 16:46:11.690618 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-2pj4h"] Feb 27 16:46:11 crc kubenswrapper[4751]: W0227 16:46:11.699454 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51f9db5a_ab58_4795_b09f_c2df5406c0cf.slice/crio-d9f20201fc66c5e1178a792e1fe60392f6b35aff35c7e35455e08a1734427403 WatchSource:0}: Error finding container d9f20201fc66c5e1178a792e1fe60392f6b35aff35c7e35455e08a1734427403: Status 404 returned error can't find the container with id d9f20201fc66c5e1178a792e1fe60392f6b35aff35c7e35455e08a1734427403 Feb 27 16:46:12 crc kubenswrapper[4751]: I0227 16:46:12.162623 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2pj4h" event={"ID":"51f9db5a-ab58-4795-b09f-c2df5406c0cf","Type":"ContainerStarted","Data":"d9f20201fc66c5e1178a792e1fe60392f6b35aff35c7e35455e08a1734427403"} Feb 27 16:46:12 crc kubenswrapper[4751]: I0227 16:46:12.165097 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sz5bd" event={"ID":"10cb8075-6c76-438d-8ba7-cacfb6acd7fe","Type":"ContainerDied","Data":"2f647e01f24758f35870a8526b0bd45e64e81b0d6ff029967b195ca0a50b95e4"} Feb 27 16:46:12 crc kubenswrapper[4751]: I0227 16:46:12.165163 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f647e01f24758f35870a8526b0bd45e64e81b0d6ff029967b195ca0a50b95e4" Feb 27 16:46:12 crc kubenswrapper[4751]: I0227 16:46:12.165223 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sz5bd" Feb 27 16:46:12 crc kubenswrapper[4751]: I0227 16:46:12.669134 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.443374 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-8q4zm"] Feb 27 16:46:13 crc kubenswrapper[4751]: E0227 16:46:13.443941 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10cb8075-6c76-438d-8ba7-cacfb6acd7fe" containerName="swift-ring-rebalance" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.443954 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="10cb8075-6c76-438d-8ba7-cacfb6acd7fe" containerName="swift-ring-rebalance" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.444118 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="10cb8075-6c76-438d-8ba7-cacfb6acd7fe" containerName="swift-ring-rebalance" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.447257 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8q4zm" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.450078 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.454523 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-8q4zm"] Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.620290 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ec80714-54dc-4207-9b0e-0eb76833d496-operator-scripts\") pod \"root-account-create-update-8q4zm\" (UID: \"8ec80714-54dc-4207-9b0e-0eb76833d496\") " pod="openstack/root-account-create-update-8q4zm" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.620752 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b8rs\" (UniqueName: \"kubernetes.io/projected/8ec80714-54dc-4207-9b0e-0eb76833d496-kube-api-access-8b8rs\") pod \"root-account-create-update-8q4zm\" (UID: \"8ec80714-54dc-4207-9b0e-0eb76833d496\") " pod="openstack/root-account-create-update-8q4zm" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.722195 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ec80714-54dc-4207-9b0e-0eb76833d496-operator-scripts\") pod \"root-account-create-update-8q4zm\" (UID: \"8ec80714-54dc-4207-9b0e-0eb76833d496\") " pod="openstack/root-account-create-update-8q4zm" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.722278 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b8rs\" (UniqueName: \"kubernetes.io/projected/8ec80714-54dc-4207-9b0e-0eb76833d496-kube-api-access-8b8rs\") pod \"root-account-create-update-8q4zm\" (UID: \"8ec80714-54dc-4207-9b0e-0eb76833d496\") " pod="openstack/root-account-create-update-8q4zm" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.723919 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ec80714-54dc-4207-9b0e-0eb76833d496-operator-scripts\") pod \"root-account-create-update-8q4zm\" (UID: \"8ec80714-54dc-4207-9b0e-0eb76833d496\") " pod="openstack/root-account-create-update-8q4zm" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.759661 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b8rs\" (UniqueName: \"kubernetes.io/projected/8ec80714-54dc-4207-9b0e-0eb76833d496-kube-api-access-8b8rs\") pod \"root-account-create-update-8q4zm\" (UID: \"8ec80714-54dc-4207-9b0e-0eb76833d496\") " pod="openstack/root-account-create-update-8q4zm" Feb 27 16:46:13 crc kubenswrapper[4751]: I0227 16:46:13.767349 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8q4zm" Feb 27 16:46:14 crc kubenswrapper[4751]: I0227 16:46:14.246691 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-8q4zm"] Feb 27 16:46:14 crc kubenswrapper[4751]: I0227 16:46:14.441196 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:46:14 crc kubenswrapper[4751]: I0227 16:46:14.449427 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift\") pod \"swift-storage-0\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " pod="openstack/swift-storage-0" Feb 27 16:46:14 crc kubenswrapper[4751]: I0227 16:46:14.720174 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Feb 27 16:46:15 crc kubenswrapper[4751]: I0227 16:46:15.193064 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8q4zm" event={"ID":"8ec80714-54dc-4207-9b0e-0eb76833d496","Type":"ContainerStarted","Data":"2d6f2c2132d520c0abbee94ca4519ac8c9598c3c76759b4cf4e798ad58b336ff"} Feb 27 16:46:15 crc kubenswrapper[4751]: I0227 16:46:15.460541 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Feb 27 16:46:15 crc kubenswrapper[4751]: W0227 16:46:15.464140 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-9bf4ad609796095aa35d0e2cb9fc5024dce46f642c5dc4c0ecdf09965dcd566f WatchSource:0}: Error finding container 9bf4ad609796095aa35d0e2cb9fc5024dce46f642c5dc4c0ecdf09965dcd566f: Status 404 returned error can't find the container with id 9bf4ad609796095aa35d0e2cb9fc5024dce46f642c5dc4c0ecdf09965dcd566f Feb 27 16:46:16 crc kubenswrapper[4751]: I0227 16:46:16.206371 4751 generic.go:334] "Generic (PLEG): container finished" podID="8ec80714-54dc-4207-9b0e-0eb76833d496" containerID="6a73879796cab2d47e005fea1c9dbd41c8bda0273883d4f118c4924ab89c54e2" exitCode=0 Feb 27 16:46:16 crc kubenswrapper[4751]: I0227 16:46:16.206565 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8q4zm" event={"ID":"8ec80714-54dc-4207-9b0e-0eb76833d496","Type":"ContainerDied","Data":"6a73879796cab2d47e005fea1c9dbd41c8bda0273883d4f118c4924ab89c54e2"} Feb 27 16:46:16 crc kubenswrapper[4751]: I0227 16:46:16.210071 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"9bf4ad609796095aa35d0e2cb9fc5024dce46f642c5dc4c0ecdf09965dcd566f"} Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.120683 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-gdjfm" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerName="ovn-controller" probeResult="failure" output=< Feb 27 16:46:17 crc kubenswrapper[4751]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Feb 27 16:46:17 crc kubenswrapper[4751]: > Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.168181 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.174728 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.394321 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-gdjfm-config-9xn59"] Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.398608 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.403729 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.404389 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-gdjfm-config-9xn59"] Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.499130 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-scripts\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.499718 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run-ovn\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.499799 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-additional-scripts\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.499843 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpdrt\" (UniqueName: \"kubernetes.io/projected/fe47550f-abfb-4941-bd29-fceb5d4074bb-kube-api-access-jpdrt\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.499949 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-log-ovn\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.500042 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.603206 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run-ovn\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.603701 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run-ovn\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.603926 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-additional-scripts\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.604032 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpdrt\" (UniqueName: \"kubernetes.io/projected/fe47550f-abfb-4941-bd29-fceb5d4074bb-kube-api-access-jpdrt\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.604170 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-log-ovn\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.604280 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-log-ovn\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.604311 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.604378 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.604472 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-scripts\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.605987 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-additional-scripts\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.607381 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-scripts\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.642429 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpdrt\" (UniqueName: \"kubernetes.io/projected/fe47550f-abfb-4941-bd29-fceb5d4074bb-kube-api-access-jpdrt\") pod \"ovn-controller-gdjfm-config-9xn59\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:17 crc kubenswrapper[4751]: I0227 16:46:17.741832 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:21 crc kubenswrapper[4751]: I0227 16:46:21.360700 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="51a81c6a-6814-412d-b77d-e741f1f74446" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Feb 27 16:46:22 crc kubenswrapper[4751]: I0227 16:46:22.118384 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-gdjfm" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerName="ovn-controller" probeResult="failure" output=< Feb 27 16:46:22 crc kubenswrapper[4751]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Feb 27 16:46:22 crc kubenswrapper[4751]: > Feb 27 16:46:22 crc kubenswrapper[4751]: I0227 16:46:22.141699 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="cecf602c-dec2-40c6-922c-bf84b707b1b9" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Feb 27 16:46:25 crc kubenswrapper[4751]: I0227 16:46:25.671206 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8q4zm" Feb 27 16:46:25 crc kubenswrapper[4751]: I0227 16:46:25.844492 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8b8rs\" (UniqueName: \"kubernetes.io/projected/8ec80714-54dc-4207-9b0e-0eb76833d496-kube-api-access-8b8rs\") pod \"8ec80714-54dc-4207-9b0e-0eb76833d496\" (UID: \"8ec80714-54dc-4207-9b0e-0eb76833d496\") " Feb 27 16:46:25 crc kubenswrapper[4751]: I0227 16:46:25.844607 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ec80714-54dc-4207-9b0e-0eb76833d496-operator-scripts\") pod \"8ec80714-54dc-4207-9b0e-0eb76833d496\" (UID: \"8ec80714-54dc-4207-9b0e-0eb76833d496\") " Feb 27 16:46:25 crc kubenswrapper[4751]: I0227 16:46:25.845382 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ec80714-54dc-4207-9b0e-0eb76833d496-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8ec80714-54dc-4207-9b0e-0eb76833d496" (UID: "8ec80714-54dc-4207-9b0e-0eb76833d496"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:25 crc kubenswrapper[4751]: I0227 16:46:25.853991 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ec80714-54dc-4207-9b0e-0eb76833d496-kube-api-access-8b8rs" (OuterVolumeSpecName: "kube-api-access-8b8rs") pod "8ec80714-54dc-4207-9b0e-0eb76833d496" (UID: "8ec80714-54dc-4207-9b0e-0eb76833d496"). InnerVolumeSpecName "kube-api-access-8b8rs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:25 crc kubenswrapper[4751]: I0227 16:46:25.947778 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8b8rs\" (UniqueName: \"kubernetes.io/projected/8ec80714-54dc-4207-9b0e-0eb76833d496-kube-api-access-8b8rs\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:25 crc kubenswrapper[4751]: I0227 16:46:25.947910 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ec80714-54dc-4207-9b0e-0eb76833d496-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:26 crc kubenswrapper[4751]: I0227 16:46:26.021643 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-gdjfm-config-9xn59"] Feb 27 16:46:26 crc kubenswrapper[4751]: W0227 16:46:26.034644 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe47550f_abfb_4941_bd29_fceb5d4074bb.slice/crio-ee6374aa5ffbe0608a66ee0e176255eebbeeb291da9ae41dc767a9140e1d1d38 WatchSource:0}: Error finding container ee6374aa5ffbe0608a66ee0e176255eebbeeb291da9ae41dc767a9140e1d1d38: Status 404 returned error can't find the container with id ee6374aa5ffbe0608a66ee0e176255eebbeeb291da9ae41dc767a9140e1d1d38 Feb 27 16:46:26 crc kubenswrapper[4751]: I0227 16:46:26.325519 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm-config-9xn59" event={"ID":"fe47550f-abfb-4941-bd29-fceb5d4074bb","Type":"ContainerStarted","Data":"ee6374aa5ffbe0608a66ee0e176255eebbeeb291da9ae41dc767a9140e1d1d38"} Feb 27 16:46:26 crc kubenswrapper[4751]: I0227 16:46:26.328756 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8q4zm" event={"ID":"8ec80714-54dc-4207-9b0e-0eb76833d496","Type":"ContainerDied","Data":"2d6f2c2132d520c0abbee94ca4519ac8c9598c3c76759b4cf4e798ad58b336ff"} Feb 27 16:46:26 crc kubenswrapper[4751]: I0227 16:46:26.328784 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d6f2c2132d520c0abbee94ca4519ac8c9598c3c76759b4cf4e798ad58b336ff" Feb 27 16:46:26 crc kubenswrapper[4751]: I0227 16:46:26.328939 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8q4zm" Feb 27 16:46:27 crc kubenswrapper[4751]: I0227 16:46:27.123868 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-gdjfm" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerName="ovn-controller" probeResult="failure" output=< Feb 27 16:46:27 crc kubenswrapper[4751]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Feb 27 16:46:27 crc kubenswrapper[4751]: > Feb 27 16:46:28 crc kubenswrapper[4751]: I0227 16:46:28.918802 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:46:28 crc kubenswrapper[4751]: I0227 16:46:28.918903 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:46:28 crc kubenswrapper[4751]: I0227 16:46:28.918972 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:46:28 crc kubenswrapper[4751]: I0227 16:46:28.920048 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ef7e9a78c9c006f209ebb578b8c3e17b655897835e4a3ab4f6e482b486441566"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 16:46:28 crc kubenswrapper[4751]: I0227 16:46:28.920152 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://ef7e9a78c9c006f209ebb578b8c3e17b655897835e4a3ab4f6e482b486441566" gracePeriod=600 Feb 27 16:46:29 crc kubenswrapper[4751]: I0227 16:46:29.359359 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm-config-9xn59" event={"ID":"fe47550f-abfb-4941-bd29-fceb5d4074bb","Type":"ContainerStarted","Data":"d07078ead15e772dd400fc2fab10411c832674de9a150659d237e367c26a856a"} Feb 27 16:46:29 crc kubenswrapper[4751]: I0227 16:46:29.362373 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"3d28978244e9cafd7b1ee3ade5f195d2ae28102706cdb9083ebd620acc9c5453"} Feb 27 16:46:29 crc kubenswrapper[4751]: E0227 16:46:29.498282 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Feb 27 16:46:29 crc kubenswrapper[4751]: E0227 16:46:29.498515 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7gfvz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-2pj4h_openstack(51f9db5a-ab58-4795-b09f-c2df5406c0cf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 27 16:46:29 crc kubenswrapper[4751]: E0227 16:46:29.499850 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-2pj4h" podUID="51f9db5a-ab58-4795-b09f-c2df5406c0cf" Feb 27 16:46:30 crc kubenswrapper[4751]: I0227 16:46:30.376050 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"3781fd0007798258c29970e2eca3675df69d6304df681043d13fc310c53b5b2d"} Feb 27 16:46:30 crc kubenswrapper[4751]: I0227 16:46:30.376826 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"a303cad8ff9d037361da17d1a15f7f3b922522d9c253a7d490bbde1a81132839"} Feb 27 16:46:30 crc kubenswrapper[4751]: I0227 16:46:30.376840 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"4cafaf593ca2edd1eaf7ce55b2075b944bb67896f93d4fa2ddbe908cbb542c69"} Feb 27 16:46:30 crc kubenswrapper[4751]: I0227 16:46:30.380440 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="ef7e9a78c9c006f209ebb578b8c3e17b655897835e4a3ab4f6e482b486441566" exitCode=0 Feb 27 16:46:30 crc kubenswrapper[4751]: I0227 16:46:30.380520 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"ef7e9a78c9c006f209ebb578b8c3e17b655897835e4a3ab4f6e482b486441566"} Feb 27 16:46:30 crc kubenswrapper[4751]: I0227 16:46:30.380547 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"62d29c4bd042871716a930e4ba973dc2a54787adada169c002b5efb7ee6d0c17"} Feb 27 16:46:30 crc kubenswrapper[4751]: I0227 16:46:30.380564 4751 scope.go:117] "RemoveContainer" containerID="9018816dbd90d84dbf45956d038f614eb1f6863111903b50bc2958c2e12ef97b" Feb 27 16:46:30 crc kubenswrapper[4751]: I0227 16:46:30.383244 4751 generic.go:334] "Generic (PLEG): container finished" podID="fe47550f-abfb-4941-bd29-fceb5d4074bb" containerID="d07078ead15e772dd400fc2fab10411c832674de9a150659d237e367c26a856a" exitCode=0 Feb 27 16:46:30 crc kubenswrapper[4751]: I0227 16:46:30.383300 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm-config-9xn59" event={"ID":"fe47550f-abfb-4941-bd29-fceb5d4074bb","Type":"ContainerDied","Data":"d07078ead15e772dd400fc2fab10411c832674de9a150659d237e367c26a856a"} Feb 27 16:46:30 crc kubenswrapper[4751]: E0227 16:46:30.386322 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-2pj4h" podUID="51f9db5a-ab58-4795-b09f-c2df5406c0cf" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.358735 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.735246 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-gw6w6"] Feb 27 16:46:31 crc kubenswrapper[4751]: E0227 16:46:31.736363 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ec80714-54dc-4207-9b0e-0eb76833d496" containerName="mariadb-account-create-update" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.736383 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ec80714-54dc-4207-9b0e-0eb76833d496" containerName="mariadb-account-create-update" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.736641 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ec80714-54dc-4207-9b0e-0eb76833d496" containerName="mariadb-account-create-update" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.737457 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-gw6w6" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.767211 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-gw6w6"] Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.834320 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.874797 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8s58\" (UniqueName: \"kubernetes.io/projected/c696c27b-af62-4855-8694-1e541307c4f5-kube-api-access-l8s58\") pod \"cinder-db-create-gw6w6\" (UID: \"c696c27b-af62-4855-8694-1e541307c4f5\") " pod="openstack/cinder-db-create-gw6w6" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.875254 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c696c27b-af62-4855-8694-1e541307c4f5-operator-scripts\") pod \"cinder-db-create-gw6w6\" (UID: \"c696c27b-af62-4855-8694-1e541307c4f5\") " pod="openstack/cinder-db-create-gw6w6" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.897797 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-f6xg8"] Feb 27 16:46:31 crc kubenswrapper[4751]: E0227 16:46:31.898119 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe47550f-abfb-4941-bd29-fceb5d4074bb" containerName="ovn-config" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.898138 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe47550f-abfb-4941-bd29-fceb5d4074bb" containerName="ovn-config" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.898318 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe47550f-abfb-4941-bd29-fceb5d4074bb" containerName="ovn-config" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.898824 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f6xg8" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.923348 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-f47b-account-create-update-nwbj8"] Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.924583 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f47b-account-create-update-nwbj8" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.926911 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.946125 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-f6xg8"] Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.951640 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f47b-account-create-update-nwbj8"] Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976219 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run-ovn\") pod \"fe47550f-abfb-4941-bd29-fceb5d4074bb\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976258 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-log-ovn\") pod \"fe47550f-abfb-4941-bd29-fceb5d4074bb\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976291 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpdrt\" (UniqueName: \"kubernetes.io/projected/fe47550f-abfb-4941-bd29-fceb5d4074bb-kube-api-access-jpdrt\") pod \"fe47550f-abfb-4941-bd29-fceb5d4074bb\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976322 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run\") pod \"fe47550f-abfb-4941-bd29-fceb5d4074bb\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976354 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-scripts\") pod \"fe47550f-abfb-4941-bd29-fceb5d4074bb\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976548 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-additional-scripts\") pod \"fe47550f-abfb-4941-bd29-fceb5d4074bb\" (UID: \"fe47550f-abfb-4941-bd29-fceb5d4074bb\") " Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976770 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "fe47550f-abfb-4941-bd29-fceb5d4074bb" (UID: "fe47550f-abfb-4941-bd29-fceb5d4074bb"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976796 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fz7m4\" (UniqueName: \"kubernetes.io/projected/39d227f2-e298-4b47-892b-a9a58e73b3d0-kube-api-access-fz7m4\") pod \"barbican-f47b-account-create-update-nwbj8\" (UID: \"39d227f2-e298-4b47-892b-a9a58e73b3d0\") " pod="openstack/barbican-f47b-account-create-update-nwbj8" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976834 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c696c27b-af62-4855-8694-1e541307c4f5-operator-scripts\") pod \"cinder-db-create-gw6w6\" (UID: \"c696c27b-af62-4855-8694-1e541307c4f5\") " pod="openstack/cinder-db-create-gw6w6" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976862 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2llsq\" (UniqueName: \"kubernetes.io/projected/fc32e789-24cd-4056-ae5d-a52e12c03df1-kube-api-access-2llsq\") pod \"barbican-db-create-f6xg8\" (UID: \"fc32e789-24cd-4056-ae5d-a52e12c03df1\") " pod="openstack/barbican-db-create-f6xg8" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976770 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run" (OuterVolumeSpecName: "var-run") pod "fe47550f-abfb-4941-bd29-fceb5d4074bb" (UID: "fe47550f-abfb-4941-bd29-fceb5d4074bb"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976901 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39d227f2-e298-4b47-892b-a9a58e73b3d0-operator-scripts\") pod \"barbican-f47b-account-create-update-nwbj8\" (UID: \"39d227f2-e298-4b47-892b-a9a58e73b3d0\") " pod="openstack/barbican-f47b-account-create-update-nwbj8" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976829 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "fe47550f-abfb-4941-bd29-fceb5d4074bb" (UID: "fe47550f-abfb-4941-bd29-fceb5d4074bb"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976926 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8s58\" (UniqueName: \"kubernetes.io/projected/c696c27b-af62-4855-8694-1e541307c4f5-kube-api-access-l8s58\") pod \"cinder-db-create-gw6w6\" (UID: \"c696c27b-af62-4855-8694-1e541307c4f5\") " pod="openstack/cinder-db-create-gw6w6" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.976982 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc32e789-24cd-4056-ae5d-a52e12c03df1-operator-scripts\") pod \"barbican-db-create-f6xg8\" (UID: \"fc32e789-24cd-4056-ae5d-a52e12c03df1\") " pod="openstack/barbican-db-create-f6xg8" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.977029 4751 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.977040 4751 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-log-ovn\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.977049 4751 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fe47550f-abfb-4941-bd29-fceb5d4074bb-var-run\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.977559 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "fe47550f-abfb-4941-bd29-fceb5d4074bb" (UID: "fe47550f-abfb-4941-bd29-fceb5d4074bb"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.977662 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c696c27b-af62-4855-8694-1e541307c4f5-operator-scripts\") pod \"cinder-db-create-gw6w6\" (UID: \"c696c27b-af62-4855-8694-1e541307c4f5\") " pod="openstack/cinder-db-create-gw6w6" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.978009 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-scripts" (OuterVolumeSpecName: "scripts") pod "fe47550f-abfb-4941-bd29-fceb5d4074bb" (UID: "fe47550f-abfb-4941-bd29-fceb5d4074bb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.978280 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-xxmvj"] Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.979310 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.983247 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.983695 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.983838 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hvwps" Feb 27 16:46:31 crc kubenswrapper[4751]: I0227 16:46:31.983947 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.004810 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe47550f-abfb-4941-bd29-fceb5d4074bb-kube-api-access-jpdrt" (OuterVolumeSpecName: "kube-api-access-jpdrt") pod "fe47550f-abfb-4941-bd29-fceb5d4074bb" (UID: "fe47550f-abfb-4941-bd29-fceb5d4074bb"). InnerVolumeSpecName "kube-api-access-jpdrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.008642 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8s58\" (UniqueName: \"kubernetes.io/projected/c696c27b-af62-4855-8694-1e541307c4f5-kube-api-access-l8s58\") pod \"cinder-db-create-gw6w6\" (UID: \"c696c27b-af62-4855-8694-1e541307c4f5\") " pod="openstack/cinder-db-create-gw6w6" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.028272 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-xxmvj"] Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.038369 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-ng9tv"] Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.043195 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ng9tv" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.045039 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-d758-account-create-update-bdsrf"] Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.046793 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d758-account-create-update-bdsrf" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.052934 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.062053 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-ng9tv"] Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.073564 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d758-account-create-update-bdsrf"] Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.079594 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39d227f2-e298-4b47-892b-a9a58e73b3d0-operator-scripts\") pod \"barbican-f47b-account-create-update-nwbj8\" (UID: \"39d227f2-e298-4b47-892b-a9a58e73b3d0\") " pod="openstack/barbican-f47b-account-create-update-nwbj8" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.079690 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc32e789-24cd-4056-ae5d-a52e12c03df1-operator-scripts\") pod \"barbican-db-create-f6xg8\" (UID: \"fc32e789-24cd-4056-ae5d-a52e12c03df1\") " pod="openstack/barbican-db-create-f6xg8" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.079738 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-config-data\") pod \"keystone-db-sync-xxmvj\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.079785 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fz7m4\" (UniqueName: \"kubernetes.io/projected/39d227f2-e298-4b47-892b-a9a58e73b3d0-kube-api-access-fz7m4\") pod \"barbican-f47b-account-create-update-nwbj8\" (UID: \"39d227f2-e298-4b47-892b-a9a58e73b3d0\") " pod="openstack/barbican-f47b-account-create-update-nwbj8" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.079807 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-combined-ca-bundle\") pod \"keystone-db-sync-xxmvj\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.079844 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2llsq\" (UniqueName: \"kubernetes.io/projected/fc32e789-24cd-4056-ae5d-a52e12c03df1-kube-api-access-2llsq\") pod \"barbican-db-create-f6xg8\" (UID: \"fc32e789-24cd-4056-ae5d-a52e12c03df1\") " pod="openstack/barbican-db-create-f6xg8" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.079864 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd6gc\" (UniqueName: \"kubernetes.io/projected/3478d8a7-c396-4368-81eb-00d79c45c5b7-kube-api-access-hd6gc\") pod \"keystone-db-sync-xxmvj\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.079918 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpdrt\" (UniqueName: \"kubernetes.io/projected/fe47550f-abfb-4941-bd29-fceb5d4074bb-kube-api-access-jpdrt\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.079931 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.079940 4751 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fe47550f-abfb-4941-bd29-fceb5d4074bb-additional-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.080743 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39d227f2-e298-4b47-892b-a9a58e73b3d0-operator-scripts\") pod \"barbican-f47b-account-create-update-nwbj8\" (UID: \"39d227f2-e298-4b47-892b-a9a58e73b3d0\") " pod="openstack/barbican-f47b-account-create-update-nwbj8" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.081263 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc32e789-24cd-4056-ae5d-a52e12c03df1-operator-scripts\") pod \"barbican-db-create-f6xg8\" (UID: \"fc32e789-24cd-4056-ae5d-a52e12c03df1\") " pod="openstack/barbican-db-create-f6xg8" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.098158 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-gw6w6" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.099149 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fz7m4\" (UniqueName: \"kubernetes.io/projected/39d227f2-e298-4b47-892b-a9a58e73b3d0-kube-api-access-fz7m4\") pod \"barbican-f47b-account-create-update-nwbj8\" (UID: \"39d227f2-e298-4b47-892b-a9a58e73b3d0\") " pod="openstack/barbican-f47b-account-create-update-nwbj8" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.105736 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2llsq\" (UniqueName: \"kubernetes.io/projected/fc32e789-24cd-4056-ae5d-a52e12c03df1-kube-api-access-2llsq\") pod \"barbican-db-create-f6xg8\" (UID: \"fc32e789-24cd-4056-ae5d-a52e12c03df1\") " pod="openstack/barbican-db-create-f6xg8" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.127733 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-gdjfm" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.143556 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.181227 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-269kj\" (UniqueName: \"kubernetes.io/projected/6d0af769-7ac5-4a47-b229-5b456f60d406-kube-api-access-269kj\") pod \"neutron-db-create-ng9tv\" (UID: \"6d0af769-7ac5-4a47-b229-5b456f60d406\") " pod="openstack/neutron-db-create-ng9tv" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.181300 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e4eeeef-55a2-4656-ada6-c653949d6b7f-operator-scripts\") pod \"cinder-d758-account-create-update-bdsrf\" (UID: \"6e4eeeef-55a2-4656-ada6-c653949d6b7f\") " pod="openstack/cinder-d758-account-create-update-bdsrf" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.181319 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-config-data\") pod \"keystone-db-sync-xxmvj\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.181354 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgb58\" (UniqueName: \"kubernetes.io/projected/6e4eeeef-55a2-4656-ada6-c653949d6b7f-kube-api-access-bgb58\") pod \"cinder-d758-account-create-update-bdsrf\" (UID: \"6e4eeeef-55a2-4656-ada6-c653949d6b7f\") " pod="openstack/cinder-d758-account-create-update-bdsrf" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.181380 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-combined-ca-bundle\") pod \"keystone-db-sync-xxmvj\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.181442 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d0af769-7ac5-4a47-b229-5b456f60d406-operator-scripts\") pod \"neutron-db-create-ng9tv\" (UID: \"6d0af769-7ac5-4a47-b229-5b456f60d406\") " pod="openstack/neutron-db-create-ng9tv" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.181466 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd6gc\" (UniqueName: \"kubernetes.io/projected/3478d8a7-c396-4368-81eb-00d79c45c5b7-kube-api-access-hd6gc\") pod \"keystone-db-sync-xxmvj\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.190618 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-combined-ca-bundle\") pod \"keystone-db-sync-xxmvj\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.192433 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-config-data\") pod \"keystone-db-sync-xxmvj\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.204191 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd6gc\" (UniqueName: \"kubernetes.io/projected/3478d8a7-c396-4368-81eb-00d79c45c5b7-kube-api-access-hd6gc\") pod \"keystone-db-sync-xxmvj\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.209175 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-fb4c-account-create-update-crj56"] Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.210101 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fb4c-account-create-update-crj56" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.221956 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-fb4c-account-create-update-crj56"] Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.223534 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.224015 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f6xg8" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.238049 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f47b-account-create-update-nwbj8" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.287227 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-269kj\" (UniqueName: \"kubernetes.io/projected/6d0af769-7ac5-4a47-b229-5b456f60d406-kube-api-access-269kj\") pod \"neutron-db-create-ng9tv\" (UID: \"6d0af769-7ac5-4a47-b229-5b456f60d406\") " pod="openstack/neutron-db-create-ng9tv" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.287314 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e4eeeef-55a2-4656-ada6-c653949d6b7f-operator-scripts\") pod \"cinder-d758-account-create-update-bdsrf\" (UID: \"6e4eeeef-55a2-4656-ada6-c653949d6b7f\") " pod="openstack/cinder-d758-account-create-update-bdsrf" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.287347 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgb58\" (UniqueName: \"kubernetes.io/projected/6e4eeeef-55a2-4656-ada6-c653949d6b7f-kube-api-access-bgb58\") pod \"cinder-d758-account-create-update-bdsrf\" (UID: \"6e4eeeef-55a2-4656-ada6-c653949d6b7f\") " pod="openstack/cinder-d758-account-create-update-bdsrf" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.287392 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf785eb4-7c95-411e-92ae-2be6b08f4d43-operator-scripts\") pod \"neutron-fb4c-account-create-update-crj56\" (UID: \"cf785eb4-7c95-411e-92ae-2be6b08f4d43\") " pod="openstack/neutron-fb4c-account-create-update-crj56" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.287425 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvcjl\" (UniqueName: \"kubernetes.io/projected/cf785eb4-7c95-411e-92ae-2be6b08f4d43-kube-api-access-vvcjl\") pod \"neutron-fb4c-account-create-update-crj56\" (UID: \"cf785eb4-7c95-411e-92ae-2be6b08f4d43\") " pod="openstack/neutron-fb4c-account-create-update-crj56" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.287445 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d0af769-7ac5-4a47-b229-5b456f60d406-operator-scripts\") pod \"neutron-db-create-ng9tv\" (UID: \"6d0af769-7ac5-4a47-b229-5b456f60d406\") " pod="openstack/neutron-db-create-ng9tv" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.288148 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d0af769-7ac5-4a47-b229-5b456f60d406-operator-scripts\") pod \"neutron-db-create-ng9tv\" (UID: \"6d0af769-7ac5-4a47-b229-5b456f60d406\") " pod="openstack/neutron-db-create-ng9tv" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.288645 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e4eeeef-55a2-4656-ada6-c653949d6b7f-operator-scripts\") pod \"cinder-d758-account-create-update-bdsrf\" (UID: \"6e4eeeef-55a2-4656-ada6-c653949d6b7f\") " pod="openstack/cinder-d758-account-create-update-bdsrf" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.304869 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.313953 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-269kj\" (UniqueName: \"kubernetes.io/projected/6d0af769-7ac5-4a47-b229-5b456f60d406-kube-api-access-269kj\") pod \"neutron-db-create-ng9tv\" (UID: \"6d0af769-7ac5-4a47-b229-5b456f60d406\") " pod="openstack/neutron-db-create-ng9tv" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.317914 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgb58\" (UniqueName: \"kubernetes.io/projected/6e4eeeef-55a2-4656-ada6-c653949d6b7f-kube-api-access-bgb58\") pod \"cinder-d758-account-create-update-bdsrf\" (UID: \"6e4eeeef-55a2-4656-ada6-c653949d6b7f\") " pod="openstack/cinder-d758-account-create-update-bdsrf" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.389277 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf785eb4-7c95-411e-92ae-2be6b08f4d43-operator-scripts\") pod \"neutron-fb4c-account-create-update-crj56\" (UID: \"cf785eb4-7c95-411e-92ae-2be6b08f4d43\") " pod="openstack/neutron-fb4c-account-create-update-crj56" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.389325 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvcjl\" (UniqueName: \"kubernetes.io/projected/cf785eb4-7c95-411e-92ae-2be6b08f4d43-kube-api-access-vvcjl\") pod \"neutron-fb4c-account-create-update-crj56\" (UID: \"cf785eb4-7c95-411e-92ae-2be6b08f4d43\") " pod="openstack/neutron-fb4c-account-create-update-crj56" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.390723 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf785eb4-7c95-411e-92ae-2be6b08f4d43-operator-scripts\") pod \"neutron-fb4c-account-create-update-crj56\" (UID: \"cf785eb4-7c95-411e-92ae-2be6b08f4d43\") " pod="openstack/neutron-fb4c-account-create-update-crj56" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.391861 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ng9tv" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.413933 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d758-account-create-update-bdsrf" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.420472 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvcjl\" (UniqueName: \"kubernetes.io/projected/cf785eb4-7c95-411e-92ae-2be6b08f4d43-kube-api-access-vvcjl\") pod \"neutron-fb4c-account-create-update-crj56\" (UID: \"cf785eb4-7c95-411e-92ae-2be6b08f4d43\") " pod="openstack/neutron-fb4c-account-create-update-crj56" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.437497 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm-config-9xn59" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.438128 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm-config-9xn59" event={"ID":"fe47550f-abfb-4941-bd29-fceb5d4074bb","Type":"ContainerDied","Data":"ee6374aa5ffbe0608a66ee0e176255eebbeeb291da9ae41dc767a9140e1d1d38"} Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.439768 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee6374aa5ffbe0608a66ee0e176255eebbeeb291da9ae41dc767a9140e1d1d38" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.550312 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fb4c-account-create-update-crj56" Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.611973 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"b93427209a64b6f4800316cc95e813dbfe839f3c1cc330375575973fc2bd09ba"} Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.612019 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"681fe80f14cb936ac1603940896731427ac9c788d645cc1ee250520ef0030927"} Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.612031 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"3e77e6f4a377245e2e374c8bf467f0ec059e1247d85be6db7d91b8496e308e18"} Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.784539 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-gw6w6"] Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.959978 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-gdjfm-config-9xn59"] Feb 27 16:46:32 crc kubenswrapper[4751]: I0227 16:46:32.968296 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-gdjfm-config-9xn59"] Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.061876 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-gdjfm-config-kkl89"] Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.071548 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.075638 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.086751 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-gdjfm-config-kkl89"] Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.114553 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.114988 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-scripts\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.115011 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-additional-scripts\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.115088 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-log-ovn\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.115121 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run-ovn\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.115140 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqljs\" (UniqueName: \"kubernetes.io/projected/ce7f17d5-740f-45f4-b7f9-2b947f431908-kube-api-access-mqljs\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.116173 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-f6xg8"] Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.147838 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f47b-account-create-update-nwbj8"] Feb 27 16:46:33 crc kubenswrapper[4751]: W0227 16:46:33.150932 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39d227f2_e298_4b47_892b_a9a58e73b3d0.slice/crio-80f91f4001a4c0a1e363c0a786264ac6e4bd14fa82188dbab9d7501799ca53d0 WatchSource:0}: Error finding container 80f91f4001a4c0a1e363c0a786264ac6e4bd14fa82188dbab9d7501799ca53d0: Status 404 returned error can't find the container with id 80f91f4001a4c0a1e363c0a786264ac6e4bd14fa82188dbab9d7501799ca53d0 Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.162065 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-ng9tv"] Feb 27 16:46:33 crc kubenswrapper[4751]: W0227 16:46:33.163961 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d0af769_7ac5_4a47_b229_5b456f60d406.slice/crio-d4beaabe8bd4625e076af6571f2e6cc4516f2caafd5b6293d549f80c894a57a4 WatchSource:0}: Error finding container d4beaabe8bd4625e076af6571f2e6cc4516f2caafd5b6293d549f80c894a57a4: Status 404 returned error can't find the container with id d4beaabe8bd4625e076af6571f2e6cc4516f2caafd5b6293d549f80c894a57a4 Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.217105 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.217151 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-scripts\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.217173 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-additional-scripts\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.217242 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-log-ovn\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.217273 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run-ovn\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.217286 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqljs\" (UniqueName: \"kubernetes.io/projected/ce7f17d5-740f-45f4-b7f9-2b947f431908-kube-api-access-mqljs\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.217869 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.218486 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-log-ovn\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.218590 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run-ovn\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.219037 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-additional-scripts\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.220315 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-scripts\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.238741 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqljs\" (UniqueName: \"kubernetes.io/projected/ce7f17d5-740f-45f4-b7f9-2b947f431908-kube-api-access-mqljs\") pod \"ovn-controller-gdjfm-config-kkl89\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.340338 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-fb4c-account-create-update-crj56"] Feb 27 16:46:33 crc kubenswrapper[4751]: W0227 16:46:33.352042 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf785eb4_7c95_411e_92ae_2be6b08f4d43.slice/crio-b6cf83872e6c68192fa7693a99e40015963addcb961b7ef21d28b31cdb93c68a WatchSource:0}: Error finding container b6cf83872e6c68192fa7693a99e40015963addcb961b7ef21d28b31cdb93c68a: Status 404 returned error can't find the container with id b6cf83872e6c68192fa7693a99e40015963addcb961b7ef21d28b31cdb93c68a Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.390426 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-xxmvj"] Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.401331 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d758-account-create-update-bdsrf"] Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.404960 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.625522 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d758-account-create-update-bdsrf" event={"ID":"6e4eeeef-55a2-4656-ada6-c653949d6b7f","Type":"ContainerStarted","Data":"7d0e7f79a6be77c26cb5cf2359cd4629ea1dad285577072500ac0aeede5275c8"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.630511 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-f6xg8" event={"ID":"fc32e789-24cd-4056-ae5d-a52e12c03df1","Type":"ContainerStarted","Data":"c2205f05f4dd6feb20fe78fb9130808228569fa6677a436be7acc32d06637459"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.630576 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-f6xg8" event={"ID":"fc32e789-24cd-4056-ae5d-a52e12c03df1","Type":"ContainerStarted","Data":"6829f4f7237947009beaecedad81dca87a2b9b2c8702bdbc1d7b6f3fa34d9d70"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.635019 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"e79d296ac1de0b104dda270fbeebf21a4f77921165bf8426ffb98a3ab9fd68bd"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.638660 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-gw6w6" event={"ID":"c696c27b-af62-4855-8694-1e541307c4f5","Type":"ContainerStarted","Data":"5257b7d6460134c8a83b3fbef89afd3c3881dadf022442936951b53d49f7804d"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.638735 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-gw6w6" event={"ID":"c696c27b-af62-4855-8694-1e541307c4f5","Type":"ContainerStarted","Data":"4a361db3b79a78137465fa01cc2a66683252d7b93ce244dd72faec4227182994"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.651222 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f47b-account-create-update-nwbj8" event={"ID":"39d227f2-e298-4b47-892b-a9a58e73b3d0","Type":"ContainerStarted","Data":"897975726e880be0fc1b35ec78227f61767a489580c895d0ac07a1b1d7e35666"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.651265 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f47b-account-create-update-nwbj8" event={"ID":"39d227f2-e298-4b47-892b-a9a58e73b3d0","Type":"ContainerStarted","Data":"80f91f4001a4c0a1e363c0a786264ac6e4bd14fa82188dbab9d7501799ca53d0"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.654527 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-f6xg8" podStartSLOduration=2.654498072 podStartE2EDuration="2.654498072s" podCreationTimestamp="2026-02-27 16:46:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:46:33.64580168 +0000 UTC m=+1355.792816137" watchObservedRunningTime="2026-02-27 16:46:33.654498072 +0000 UTC m=+1355.801512519" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.655888 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ng9tv" event={"ID":"6d0af769-7ac5-4a47-b229-5b456f60d406","Type":"ContainerStarted","Data":"fd3db8fe83eca043a38528e17fcc22e812b9c08be75ab024119f3b8e186ffc32"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.655940 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ng9tv" event={"ID":"6d0af769-7ac5-4a47-b229-5b456f60d406","Type":"ContainerStarted","Data":"d4beaabe8bd4625e076af6571f2e6cc4516f2caafd5b6293d549f80c894a57a4"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.671380 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fb4c-account-create-update-crj56" event={"ID":"cf785eb4-7c95-411e-92ae-2be6b08f4d43","Type":"ContainerStarted","Data":"b6cf83872e6c68192fa7693a99e40015963addcb961b7ef21d28b31cdb93c68a"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.679607 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xxmvj" event={"ID":"3478d8a7-c396-4368-81eb-00d79c45c5b7","Type":"ContainerStarted","Data":"3b42606ec354e59cfad1c1353ddb34382871870ca6b97857c8810a00984a3b14"} Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.699123 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-ng9tv" podStartSLOduration=2.699105044 podStartE2EDuration="2.699105044s" podCreationTimestamp="2026-02-27 16:46:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:46:33.688232243 +0000 UTC m=+1355.835246690" watchObservedRunningTime="2026-02-27 16:46:33.699105044 +0000 UTC m=+1355.846119491" Feb 27 16:46:33 crc kubenswrapper[4751]: I0227 16:46:33.717713 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-f47b-account-create-update-nwbj8" podStartSLOduration=2.71768732 podStartE2EDuration="2.71768732s" podCreationTimestamp="2026-02-27 16:46:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:46:33.707740374 +0000 UTC m=+1355.854754821" watchObservedRunningTime="2026-02-27 16:46:33.71768732 +0000 UTC m=+1355.864701767" Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.005180 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-gdjfm-config-kkl89"] Feb 27 16:46:34 crc kubenswrapper[4751]: W0227 16:46:34.013492 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce7f17d5_740f_45f4_b7f9_2b947f431908.slice/crio-c2f9bd5483274e1cbe796fedc739ff40db2dcc9523f97918d976ec58006103ee WatchSource:0}: Error finding container c2f9bd5483274e1cbe796fedc739ff40db2dcc9523f97918d976ec58006103ee: Status 404 returned error can't find the container with id c2f9bd5483274e1cbe796fedc739ff40db2dcc9523f97918d976ec58006103ee Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.529996 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe47550f-abfb-4941-bd29-fceb5d4074bb" path="/var/lib/kubelet/pods/fe47550f-abfb-4941-bd29-fceb5d4074bb/volumes" Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.695581 4751 generic.go:334] "Generic (PLEG): container finished" podID="fc32e789-24cd-4056-ae5d-a52e12c03df1" containerID="c2205f05f4dd6feb20fe78fb9130808228569fa6677a436be7acc32d06637459" exitCode=0 Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.695720 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-f6xg8" event={"ID":"fc32e789-24cd-4056-ae5d-a52e12c03df1","Type":"ContainerDied","Data":"c2205f05f4dd6feb20fe78fb9130808228569fa6677a436be7acc32d06637459"} Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.698617 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-gw6w6" event={"ID":"c696c27b-af62-4855-8694-1e541307c4f5","Type":"ContainerDied","Data":"5257b7d6460134c8a83b3fbef89afd3c3881dadf022442936951b53d49f7804d"} Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.698690 4751 generic.go:334] "Generic (PLEG): container finished" podID="c696c27b-af62-4855-8694-1e541307c4f5" containerID="5257b7d6460134c8a83b3fbef89afd3c3881dadf022442936951b53d49f7804d" exitCode=0 Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.703538 4751 generic.go:334] "Generic (PLEG): container finished" podID="39d227f2-e298-4b47-892b-a9a58e73b3d0" containerID="897975726e880be0fc1b35ec78227f61767a489580c895d0ac07a1b1d7e35666" exitCode=0 Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.703630 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f47b-account-create-update-nwbj8" event={"ID":"39d227f2-e298-4b47-892b-a9a58e73b3d0","Type":"ContainerDied","Data":"897975726e880be0fc1b35ec78227f61767a489580c895d0ac07a1b1d7e35666"} Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.712733 4751 generic.go:334] "Generic (PLEG): container finished" podID="ce7f17d5-740f-45f4-b7f9-2b947f431908" containerID="140b8fc7115efd7456e187dc7e0388dc53edef9eb6452f05d03914250c59cc20" exitCode=0 Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.713075 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm-config-kkl89" event={"ID":"ce7f17d5-740f-45f4-b7f9-2b947f431908","Type":"ContainerDied","Data":"140b8fc7115efd7456e187dc7e0388dc53edef9eb6452f05d03914250c59cc20"} Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.713117 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm-config-kkl89" event={"ID":"ce7f17d5-740f-45f4-b7f9-2b947f431908","Type":"ContainerStarted","Data":"c2f9bd5483274e1cbe796fedc739ff40db2dcc9523f97918d976ec58006103ee"} Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.716911 4751 generic.go:334] "Generic (PLEG): container finished" podID="6d0af769-7ac5-4a47-b229-5b456f60d406" containerID="fd3db8fe83eca043a38528e17fcc22e812b9c08be75ab024119f3b8e186ffc32" exitCode=0 Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.717012 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ng9tv" event={"ID":"6d0af769-7ac5-4a47-b229-5b456f60d406","Type":"ContainerDied","Data":"fd3db8fe83eca043a38528e17fcc22e812b9c08be75ab024119f3b8e186ffc32"} Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.718735 4751 generic.go:334] "Generic (PLEG): container finished" podID="cf785eb4-7c95-411e-92ae-2be6b08f4d43" containerID="e69c7cd9f46c7fcbc615d5c40b4025a63c73fd3c765fbd8afc93ab358c87eb2a" exitCode=0 Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.718814 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fb4c-account-create-update-crj56" event={"ID":"cf785eb4-7c95-411e-92ae-2be6b08f4d43","Type":"ContainerDied","Data":"e69c7cd9f46c7fcbc615d5c40b4025a63c73fd3c765fbd8afc93ab358c87eb2a"} Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.720997 4751 generic.go:334] "Generic (PLEG): container finished" podID="6e4eeeef-55a2-4656-ada6-c653949d6b7f" containerID="65d85414151eefbb245f3b07b64f200b65b4e9916a138f81be1aac50636a36f4" exitCode=0 Feb 27 16:46:34 crc kubenswrapper[4751]: I0227 16:46:34.721052 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d758-account-create-update-bdsrf" event={"ID":"6e4eeeef-55a2-4656-ada6-c653949d6b7f","Type":"ContainerDied","Data":"65d85414151eefbb245f3b07b64f200b65b4e9916a138f81be1aac50636a36f4"} Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.154074 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-gw6w6" Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.265794 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c696c27b-af62-4855-8694-1e541307c4f5-operator-scripts\") pod \"c696c27b-af62-4855-8694-1e541307c4f5\" (UID: \"c696c27b-af62-4855-8694-1e541307c4f5\") " Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.265895 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8s58\" (UniqueName: \"kubernetes.io/projected/c696c27b-af62-4855-8694-1e541307c4f5-kube-api-access-l8s58\") pod \"c696c27b-af62-4855-8694-1e541307c4f5\" (UID: \"c696c27b-af62-4855-8694-1e541307c4f5\") " Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.267472 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c696c27b-af62-4855-8694-1e541307c4f5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c696c27b-af62-4855-8694-1e541307c4f5" (UID: "c696c27b-af62-4855-8694-1e541307c4f5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.270951 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c696c27b-af62-4855-8694-1e541307c4f5-kube-api-access-l8s58" (OuterVolumeSpecName: "kube-api-access-l8s58") pod "c696c27b-af62-4855-8694-1e541307c4f5" (UID: "c696c27b-af62-4855-8694-1e541307c4f5"). InnerVolumeSpecName "kube-api-access-l8s58". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:35 crc kubenswrapper[4751]: E0227 16:46:35.337584 4751 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.64:55228->38.102.83.64:46317: write tcp 38.102.83.64:55228->38.102.83.64:46317: write: broken pipe Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.368366 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c696c27b-af62-4855-8694-1e541307c4f5-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.368457 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8s58\" (UniqueName: \"kubernetes.io/projected/c696c27b-af62-4855-8694-1e541307c4f5-kube-api-access-l8s58\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.737103 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"d9d8a8e1e05ef6e1c7673a877cb8fbfae2175606b7184f573f2d5f77b2fb1d24"} Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.737589 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"4ca99524eea4a99a550fc63f10a9bcbfd3c3e4a41fa126a72fb0140d5e9d14f0"} Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.740095 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-gw6w6" Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.740113 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-gw6w6" event={"ID":"c696c27b-af62-4855-8694-1e541307c4f5","Type":"ContainerDied","Data":"4a361db3b79a78137465fa01cc2a66683252d7b93ce244dd72faec4227182994"} Feb 27 16:46:35 crc kubenswrapper[4751]: I0227 16:46:35.740147 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a361db3b79a78137465fa01cc2a66683252d7b93ce244dd72faec4227182994" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.110713 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d758-account-create-update-bdsrf" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.188985 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgb58\" (UniqueName: \"kubernetes.io/projected/6e4eeeef-55a2-4656-ada6-c653949d6b7f-kube-api-access-bgb58\") pod \"6e4eeeef-55a2-4656-ada6-c653949d6b7f\" (UID: \"6e4eeeef-55a2-4656-ada6-c653949d6b7f\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.189838 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e4eeeef-55a2-4656-ada6-c653949d6b7f-operator-scripts\") pod \"6e4eeeef-55a2-4656-ada6-c653949d6b7f\" (UID: \"6e4eeeef-55a2-4656-ada6-c653949d6b7f\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.190494 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e4eeeef-55a2-4656-ada6-c653949d6b7f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6e4eeeef-55a2-4656-ada6-c653949d6b7f" (UID: "6e4eeeef-55a2-4656-ada6-c653949d6b7f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.194294 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e4eeeef-55a2-4656-ada6-c653949d6b7f-kube-api-access-bgb58" (OuterVolumeSpecName: "kube-api-access-bgb58") pod "6e4eeeef-55a2-4656-ada6-c653949d6b7f" (UID: "6e4eeeef-55a2-4656-ada6-c653949d6b7f"). InnerVolumeSpecName "kube-api-access-bgb58". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.292899 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e4eeeef-55a2-4656-ada6-c653949d6b7f-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.292962 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgb58\" (UniqueName: \"kubernetes.io/projected/6e4eeeef-55a2-4656-ada6-c653949d6b7f-kube-api-access-bgb58\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.338080 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fb4c-account-create-update-crj56" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.364843 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f6xg8" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.376735 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f47b-account-create-update-nwbj8" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.383841 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ng9tv" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.389716 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.394035 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvcjl\" (UniqueName: \"kubernetes.io/projected/cf785eb4-7c95-411e-92ae-2be6b08f4d43-kube-api-access-vvcjl\") pod \"cf785eb4-7c95-411e-92ae-2be6b08f4d43\" (UID: \"cf785eb4-7c95-411e-92ae-2be6b08f4d43\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.394085 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fz7m4\" (UniqueName: \"kubernetes.io/projected/39d227f2-e298-4b47-892b-a9a58e73b3d0-kube-api-access-fz7m4\") pod \"39d227f2-e298-4b47-892b-a9a58e73b3d0\" (UID: \"39d227f2-e298-4b47-892b-a9a58e73b3d0\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.394259 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf785eb4-7c95-411e-92ae-2be6b08f4d43-operator-scripts\") pod \"cf785eb4-7c95-411e-92ae-2be6b08f4d43\" (UID: \"cf785eb4-7c95-411e-92ae-2be6b08f4d43\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.394312 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2llsq\" (UniqueName: \"kubernetes.io/projected/fc32e789-24cd-4056-ae5d-a52e12c03df1-kube-api-access-2llsq\") pod \"fc32e789-24cd-4056-ae5d-a52e12c03df1\" (UID: \"fc32e789-24cd-4056-ae5d-a52e12c03df1\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.394342 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc32e789-24cd-4056-ae5d-a52e12c03df1-operator-scripts\") pod \"fc32e789-24cd-4056-ae5d-a52e12c03df1\" (UID: \"fc32e789-24cd-4056-ae5d-a52e12c03df1\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.394382 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39d227f2-e298-4b47-892b-a9a58e73b3d0-operator-scripts\") pod \"39d227f2-e298-4b47-892b-a9a58e73b3d0\" (UID: \"39d227f2-e298-4b47-892b-a9a58e73b3d0\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.395194 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39d227f2-e298-4b47-892b-a9a58e73b3d0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "39d227f2-e298-4b47-892b-a9a58e73b3d0" (UID: "39d227f2-e298-4b47-892b-a9a58e73b3d0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.401223 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc32e789-24cd-4056-ae5d-a52e12c03df1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fc32e789-24cd-4056-ae5d-a52e12c03df1" (UID: "fc32e789-24cd-4056-ae5d-a52e12c03df1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.402770 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf785eb4-7c95-411e-92ae-2be6b08f4d43-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cf785eb4-7c95-411e-92ae-2be6b08f4d43" (UID: "cf785eb4-7c95-411e-92ae-2be6b08f4d43"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.407197 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc32e789-24cd-4056-ae5d-a52e12c03df1-kube-api-access-2llsq" (OuterVolumeSpecName: "kube-api-access-2llsq") pod "fc32e789-24cd-4056-ae5d-a52e12c03df1" (UID: "fc32e789-24cd-4056-ae5d-a52e12c03df1"). InnerVolumeSpecName "kube-api-access-2llsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.407698 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39d227f2-e298-4b47-892b-a9a58e73b3d0-kube-api-access-fz7m4" (OuterVolumeSpecName: "kube-api-access-fz7m4") pod "39d227f2-e298-4b47-892b-a9a58e73b3d0" (UID: "39d227f2-e298-4b47-892b-a9a58e73b3d0"). InnerVolumeSpecName "kube-api-access-fz7m4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.410891 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf785eb4-7c95-411e-92ae-2be6b08f4d43-kube-api-access-vvcjl" (OuterVolumeSpecName: "kube-api-access-vvcjl") pod "cf785eb4-7c95-411e-92ae-2be6b08f4d43" (UID: "cf785eb4-7c95-411e-92ae-2be6b08f4d43"). InnerVolumeSpecName "kube-api-access-vvcjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.495988 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-scripts\") pod \"ce7f17d5-740f-45f4-b7f9-2b947f431908\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496048 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run-ovn\") pod \"ce7f17d5-740f-45f4-b7f9-2b947f431908\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496111 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-additional-scripts\") pod \"ce7f17d5-740f-45f4-b7f9-2b947f431908\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496211 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-log-ovn\") pod \"ce7f17d5-740f-45f4-b7f9-2b947f431908\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496262 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-269kj\" (UniqueName: \"kubernetes.io/projected/6d0af769-7ac5-4a47-b229-5b456f60d406-kube-api-access-269kj\") pod \"6d0af769-7ac5-4a47-b229-5b456f60d406\" (UID: \"6d0af769-7ac5-4a47-b229-5b456f60d406\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496286 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d0af769-7ac5-4a47-b229-5b456f60d406-operator-scripts\") pod \"6d0af769-7ac5-4a47-b229-5b456f60d406\" (UID: \"6d0af769-7ac5-4a47-b229-5b456f60d406\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496310 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "ce7f17d5-740f-45f4-b7f9-2b947f431908" (UID: "ce7f17d5-740f-45f4-b7f9-2b947f431908"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496368 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "ce7f17d5-740f-45f4-b7f9-2b947f431908" (UID: "ce7f17d5-740f-45f4-b7f9-2b947f431908"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496373 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run\") pod \"ce7f17d5-740f-45f4-b7f9-2b947f431908\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496467 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqljs\" (UniqueName: \"kubernetes.io/projected/ce7f17d5-740f-45f4-b7f9-2b947f431908-kube-api-access-mqljs\") pod \"ce7f17d5-740f-45f4-b7f9-2b947f431908\" (UID: \"ce7f17d5-740f-45f4-b7f9-2b947f431908\") " Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496611 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run" (OuterVolumeSpecName: "var-run") pod "ce7f17d5-740f-45f4-b7f9-2b947f431908" (UID: "ce7f17d5-740f-45f4-b7f9-2b947f431908"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496911 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf785eb4-7c95-411e-92ae-2be6b08f4d43-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496932 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2llsq\" (UniqueName: \"kubernetes.io/projected/fc32e789-24cd-4056-ae5d-a52e12c03df1-kube-api-access-2llsq\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496946 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc32e789-24cd-4056-ae5d-a52e12c03df1-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496955 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39d227f2-e298-4b47-892b-a9a58e73b3d0-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496966 4751 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496976 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvcjl\" (UniqueName: \"kubernetes.io/projected/cf785eb4-7c95-411e-92ae-2be6b08f4d43-kube-api-access-vvcjl\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496985 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fz7m4\" (UniqueName: \"kubernetes.io/projected/39d227f2-e298-4b47-892b-a9a58e73b3d0-kube-api-access-fz7m4\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.496997 4751 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-log-ovn\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.497005 4751 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ce7f17d5-740f-45f4-b7f9-2b947f431908-var-run\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.497060 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "ce7f17d5-740f-45f4-b7f9-2b947f431908" (UID: "ce7f17d5-740f-45f4-b7f9-2b947f431908"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.497555 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d0af769-7ac5-4a47-b229-5b456f60d406-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6d0af769-7ac5-4a47-b229-5b456f60d406" (UID: "6d0af769-7ac5-4a47-b229-5b456f60d406"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.497823 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-scripts" (OuterVolumeSpecName: "scripts") pod "ce7f17d5-740f-45f4-b7f9-2b947f431908" (UID: "ce7f17d5-740f-45f4-b7f9-2b947f431908"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.500232 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce7f17d5-740f-45f4-b7f9-2b947f431908-kube-api-access-mqljs" (OuterVolumeSpecName: "kube-api-access-mqljs") pod "ce7f17d5-740f-45f4-b7f9-2b947f431908" (UID: "ce7f17d5-740f-45f4-b7f9-2b947f431908"). InnerVolumeSpecName "kube-api-access-mqljs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.500968 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d0af769-7ac5-4a47-b229-5b456f60d406-kube-api-access-269kj" (OuterVolumeSpecName: "kube-api-access-269kj") pod "6d0af769-7ac5-4a47-b229-5b456f60d406" (UID: "6d0af769-7ac5-4a47-b229-5b456f60d406"). InnerVolumeSpecName "kube-api-access-269kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.601334 4751 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-additional-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.601384 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-269kj\" (UniqueName: \"kubernetes.io/projected/6d0af769-7ac5-4a47-b229-5b456f60d406-kube-api-access-269kj\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.601424 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d0af769-7ac5-4a47-b229-5b456f60d406-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.601434 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqljs\" (UniqueName: \"kubernetes.io/projected/ce7f17d5-740f-45f4-b7f9-2b947f431908-kube-api-access-mqljs\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.601443 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce7f17d5-740f-45f4-b7f9-2b947f431908-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.760645 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"de66ee2999fac29e22b4821b042f3b6c8bcd8af2215e1895604af96a423ffd6d"} Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.761060 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"8d3c817e96059d70b12ef7286c0552c07aa2499059b8053476c3d15e8305625f"} Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.761077 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"83ce35274bb0792c55293b6011d40b169da183543e50dad4bd7e201d6f6a7146"} Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.761088 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"23f3e1c514c8b828bd9ae5bb87e214f223a4588483efcabd156863e7581c145e"} Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.764415 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f47b-account-create-update-nwbj8" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.764413 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f47b-account-create-update-nwbj8" event={"ID":"39d227f2-e298-4b47-892b-a9a58e73b3d0","Type":"ContainerDied","Data":"80f91f4001a4c0a1e363c0a786264ac6e4bd14fa82188dbab9d7501799ca53d0"} Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.764536 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80f91f4001a4c0a1e363c0a786264ac6e4bd14fa82188dbab9d7501799ca53d0" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.767022 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm-config-kkl89" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.767029 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm-config-kkl89" event={"ID":"ce7f17d5-740f-45f4-b7f9-2b947f431908","Type":"ContainerDied","Data":"c2f9bd5483274e1cbe796fedc739ff40db2dcc9523f97918d976ec58006103ee"} Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.767502 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2f9bd5483274e1cbe796fedc739ff40db2dcc9523f97918d976ec58006103ee" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.770032 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ng9tv" event={"ID":"6d0af769-7ac5-4a47-b229-5b456f60d406","Type":"ContainerDied","Data":"d4beaabe8bd4625e076af6571f2e6cc4516f2caafd5b6293d549f80c894a57a4"} Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.770065 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4beaabe8bd4625e076af6571f2e6cc4516f2caafd5b6293d549f80c894a57a4" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.770036 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ng9tv" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.772751 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fb4c-account-create-update-crj56" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.773162 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fb4c-account-create-update-crj56" event={"ID":"cf785eb4-7c95-411e-92ae-2be6b08f4d43","Type":"ContainerDied","Data":"b6cf83872e6c68192fa7693a99e40015963addcb961b7ef21d28b31cdb93c68a"} Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.773190 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6cf83872e6c68192fa7693a99e40015963addcb961b7ef21d28b31cdb93c68a" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.775290 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d758-account-create-update-bdsrf" event={"ID":"6e4eeeef-55a2-4656-ada6-c653949d6b7f","Type":"ContainerDied","Data":"7d0e7f79a6be77c26cb5cf2359cd4629ea1dad285577072500ac0aeede5275c8"} Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.775312 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d0e7f79a6be77c26cb5cf2359cd4629ea1dad285577072500ac0aeede5275c8" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.775346 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d758-account-create-update-bdsrf" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.781441 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-f6xg8" event={"ID":"fc32e789-24cd-4056-ae5d-a52e12c03df1","Type":"ContainerDied","Data":"6829f4f7237947009beaecedad81dca87a2b9b2c8702bdbc1d7b6f3fa34d9d70"} Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.781677 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6829f4f7237947009beaecedad81dca87a2b9b2c8702bdbc1d7b6f3fa34d9d70" Feb 27 16:46:36 crc kubenswrapper[4751]: I0227 16:46:36.781458 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f6xg8" Feb 27 16:46:37 crc kubenswrapper[4751]: I0227 16:46:37.479653 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-gdjfm-config-kkl89"] Feb 27 16:46:37 crc kubenswrapper[4751]: I0227 16:46:37.494189 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-gdjfm-config-kkl89"] Feb 27 16:46:37 crc kubenswrapper[4751]: I0227 16:46:37.795575 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerStarted","Data":"5d9b075940c17ccffe3c35e09be4bf03a3f95fc97562c089e3ed06153ce12e22"} Feb 27 16:46:37 crc kubenswrapper[4751]: I0227 16:46:37.848711 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=21.176311573 podStartE2EDuration="40.84869268s" podCreationTimestamp="2026-02-27 16:45:57 +0000 UTC" firstStartedPulling="2026-02-27 16:46:15.466155208 +0000 UTC m=+1337.613169655" lastFinishedPulling="2026-02-27 16:46:35.138536315 +0000 UTC m=+1357.285550762" observedRunningTime="2026-02-27 16:46:37.844333594 +0000 UTC m=+1359.991348121" watchObservedRunningTime="2026-02-27 16:46:37.84869268 +0000 UTC m=+1359.995707127" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.161687 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-4jwzm"] Feb 27 16:46:38 crc kubenswrapper[4751]: E0227 16:46:38.162184 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c696c27b-af62-4855-8694-1e541307c4f5" containerName="mariadb-database-create" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162206 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c696c27b-af62-4855-8694-1e541307c4f5" containerName="mariadb-database-create" Feb 27 16:46:38 crc kubenswrapper[4751]: E0227 16:46:38.162235 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39d227f2-e298-4b47-892b-a9a58e73b3d0" containerName="mariadb-account-create-update" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162248 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="39d227f2-e298-4b47-892b-a9a58e73b3d0" containerName="mariadb-account-create-update" Feb 27 16:46:38 crc kubenswrapper[4751]: E0227 16:46:38.162277 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e4eeeef-55a2-4656-ada6-c653949d6b7f" containerName="mariadb-account-create-update" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162288 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e4eeeef-55a2-4656-ada6-c653949d6b7f" containerName="mariadb-account-create-update" Feb 27 16:46:38 crc kubenswrapper[4751]: E0227 16:46:38.162308 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf785eb4-7c95-411e-92ae-2be6b08f4d43" containerName="mariadb-account-create-update" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162320 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf785eb4-7c95-411e-92ae-2be6b08f4d43" containerName="mariadb-account-create-update" Feb 27 16:46:38 crc kubenswrapper[4751]: E0227 16:46:38.162336 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc32e789-24cd-4056-ae5d-a52e12c03df1" containerName="mariadb-database-create" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162347 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc32e789-24cd-4056-ae5d-a52e12c03df1" containerName="mariadb-database-create" Feb 27 16:46:38 crc kubenswrapper[4751]: E0227 16:46:38.162369 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce7f17d5-740f-45f4-b7f9-2b947f431908" containerName="ovn-config" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162425 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce7f17d5-740f-45f4-b7f9-2b947f431908" containerName="ovn-config" Feb 27 16:46:38 crc kubenswrapper[4751]: E0227 16:46:38.162447 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d0af769-7ac5-4a47-b229-5b456f60d406" containerName="mariadb-database-create" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162458 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d0af769-7ac5-4a47-b229-5b456f60d406" containerName="mariadb-database-create" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162707 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf785eb4-7c95-411e-92ae-2be6b08f4d43" containerName="mariadb-account-create-update" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162735 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="39d227f2-e298-4b47-892b-a9a58e73b3d0" containerName="mariadb-account-create-update" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162753 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc32e789-24cd-4056-ae5d-a52e12c03df1" containerName="mariadb-database-create" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162771 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d0af769-7ac5-4a47-b229-5b456f60d406" containerName="mariadb-database-create" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162800 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="c696c27b-af62-4855-8694-1e541307c4f5" containerName="mariadb-database-create" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162812 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e4eeeef-55a2-4656-ada6-c653949d6b7f" containerName="mariadb-account-create-update" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.162822 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce7f17d5-740f-45f4-b7f9-2b947f431908" containerName="ovn-config" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.164179 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.170648 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.178996 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-4jwzm"] Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.230510 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pljvg\" (UniqueName: \"kubernetes.io/projected/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-kube-api-access-pljvg\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.230557 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.230591 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-svc\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.230643 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.230670 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.231011 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-config\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.331765 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.331832 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.331918 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-config\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.331965 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pljvg\" (UniqueName: \"kubernetes.io/projected/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-kube-api-access-pljvg\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.331990 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.332025 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-svc\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.333219 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.333368 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.333443 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-config\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.333997 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-svc\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.334691 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.378380 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pljvg\" (UniqueName: \"kubernetes.io/projected/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-kube-api-access-pljvg\") pod \"dnsmasq-dns-764c5664d7-4jwzm\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.490912 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:38 crc kubenswrapper[4751]: I0227 16:46:38.532142 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce7f17d5-740f-45f4-b7f9-2b947f431908" path="/var/lib/kubelet/pods/ce7f17d5-740f-45f4-b7f9-2b947f431908/volumes" Feb 27 16:46:40 crc kubenswrapper[4751]: I0227 16:46:40.548698 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-4jwzm"] Feb 27 16:46:40 crc kubenswrapper[4751]: W0227 16:46:40.549771 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7e69797_e4c2_493e_869f_b1e6ef0b95b0.slice/crio-f0e799d23dcafe7c62f996ceae32b84497f6c0c30c0de0244abcbf913d439b6b WatchSource:0}: Error finding container f0e799d23dcafe7c62f996ceae32b84497f6c0c30c0de0244abcbf913d439b6b: Status 404 returned error can't find the container with id f0e799d23dcafe7c62f996ceae32b84497f6c0c30c0de0244abcbf913d439b6b Feb 27 16:46:40 crc kubenswrapper[4751]: I0227 16:46:40.822965 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xxmvj" event={"ID":"3478d8a7-c396-4368-81eb-00d79c45c5b7","Type":"ContainerStarted","Data":"25ba2a6628ec1ee4d5b76b5402988f5b6e113ebdefb932264b55ca80456079a1"} Feb 27 16:46:40 crc kubenswrapper[4751]: I0227 16:46:40.825848 4751 generic.go:334] "Generic (PLEG): container finished" podID="c7e69797-e4c2-493e-869f-b1e6ef0b95b0" containerID="c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c" exitCode=0 Feb 27 16:46:40 crc kubenswrapper[4751]: I0227 16:46:40.825897 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" event={"ID":"c7e69797-e4c2-493e-869f-b1e6ef0b95b0","Type":"ContainerDied","Data":"c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c"} Feb 27 16:46:40 crc kubenswrapper[4751]: I0227 16:46:40.825955 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" event={"ID":"c7e69797-e4c2-493e-869f-b1e6ef0b95b0","Type":"ContainerStarted","Data":"f0e799d23dcafe7c62f996ceae32b84497f6c0c30c0de0244abcbf913d439b6b"} Feb 27 16:46:40 crc kubenswrapper[4751]: I0227 16:46:40.852833 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-xxmvj" podStartSLOduration=3.109334392 podStartE2EDuration="9.85281468s" podCreationTimestamp="2026-02-27 16:46:31 +0000 UTC" firstStartedPulling="2026-02-27 16:46:33.404793562 +0000 UTC m=+1355.551808009" lastFinishedPulling="2026-02-27 16:46:40.14827385 +0000 UTC m=+1362.295288297" observedRunningTime="2026-02-27 16:46:40.846158332 +0000 UTC m=+1362.993172819" watchObservedRunningTime="2026-02-27 16:46:40.85281468 +0000 UTC m=+1362.999829127" Feb 27 16:46:41 crc kubenswrapper[4751]: I0227 16:46:41.841879 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" event={"ID":"c7e69797-e4c2-493e-869f-b1e6ef0b95b0","Type":"ContainerStarted","Data":"32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8"} Feb 27 16:46:41 crc kubenswrapper[4751]: I0227 16:46:41.872024 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" podStartSLOduration=3.872005585 podStartE2EDuration="3.872005585s" podCreationTimestamp="2026-02-27 16:46:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:46:41.867273869 +0000 UTC m=+1364.014288316" watchObservedRunningTime="2026-02-27 16:46:41.872005585 +0000 UTC m=+1364.019020042" Feb 27 16:46:42 crc kubenswrapper[4751]: I0227 16:46:42.856908 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:43 crc kubenswrapper[4751]: I0227 16:46:43.866110 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2pj4h" event={"ID":"51f9db5a-ab58-4795-b09f-c2df5406c0cf","Type":"ContainerStarted","Data":"62783f7a506513bf1395f3c504deef531a9030d5bf80991511d067dd84478217"} Feb 27 16:46:43 crc kubenswrapper[4751]: I0227 16:46:43.895254 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-2pj4h" podStartSLOduration=2.939391321 podStartE2EDuration="33.895230292s" podCreationTimestamp="2026-02-27 16:46:10 +0000 UTC" firstStartedPulling="2026-02-27 16:46:11.702585382 +0000 UTC m=+1333.849599829" lastFinishedPulling="2026-02-27 16:46:42.658424363 +0000 UTC m=+1364.805438800" observedRunningTime="2026-02-27 16:46:43.886880629 +0000 UTC m=+1366.033895106" watchObservedRunningTime="2026-02-27 16:46:43.895230292 +0000 UTC m=+1366.042244769" Feb 27 16:46:44 crc kubenswrapper[4751]: I0227 16:46:44.038090 4751 scope.go:117] "RemoveContainer" containerID="21531c6323ea30503124a33a716bdbd9ac3a3f6b39a03e44004cf805cd19e25c" Feb 27 16:46:45 crc kubenswrapper[4751]: I0227 16:46:45.888514 4751 generic.go:334] "Generic (PLEG): container finished" podID="3478d8a7-c396-4368-81eb-00d79c45c5b7" containerID="25ba2a6628ec1ee4d5b76b5402988f5b6e113ebdefb932264b55ca80456079a1" exitCode=0 Feb 27 16:46:45 crc kubenswrapper[4751]: I0227 16:46:45.888583 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xxmvj" event={"ID":"3478d8a7-c396-4368-81eb-00d79c45c5b7","Type":"ContainerDied","Data":"25ba2a6628ec1ee4d5b76b5402988f5b6e113ebdefb932264b55ca80456079a1"} Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.233784 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.407466 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-config-data\") pod \"3478d8a7-c396-4368-81eb-00d79c45c5b7\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.407571 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-combined-ca-bundle\") pod \"3478d8a7-c396-4368-81eb-00d79c45c5b7\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.407604 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hd6gc\" (UniqueName: \"kubernetes.io/projected/3478d8a7-c396-4368-81eb-00d79c45c5b7-kube-api-access-hd6gc\") pod \"3478d8a7-c396-4368-81eb-00d79c45c5b7\" (UID: \"3478d8a7-c396-4368-81eb-00d79c45c5b7\") " Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.414156 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3478d8a7-c396-4368-81eb-00d79c45c5b7-kube-api-access-hd6gc" (OuterVolumeSpecName: "kube-api-access-hd6gc") pod "3478d8a7-c396-4368-81eb-00d79c45c5b7" (UID: "3478d8a7-c396-4368-81eb-00d79c45c5b7"). InnerVolumeSpecName "kube-api-access-hd6gc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.455780 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3478d8a7-c396-4368-81eb-00d79c45c5b7" (UID: "3478d8a7-c396-4368-81eb-00d79c45c5b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.456238 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-config-data" (OuterVolumeSpecName: "config-data") pod "3478d8a7-c396-4368-81eb-00d79c45c5b7" (UID: "3478d8a7-c396-4368-81eb-00d79c45c5b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.509960 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.510020 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3478d8a7-c396-4368-81eb-00d79c45c5b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.510043 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hd6gc\" (UniqueName: \"kubernetes.io/projected/3478d8a7-c396-4368-81eb-00d79c45c5b7-kube-api-access-hd6gc\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.908205 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xxmvj" event={"ID":"3478d8a7-c396-4368-81eb-00d79c45c5b7","Type":"ContainerDied","Data":"3b42606ec354e59cfad1c1353ddb34382871870ca6b97857c8810a00984a3b14"} Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.908245 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b42606ec354e59cfad1c1353ddb34382871870ca6b97857c8810a00984a3b14" Feb 27 16:46:47 crc kubenswrapper[4751]: I0227 16:46:47.908263 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xxmvj" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.150432 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-4jwzm"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.151026 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" podUID="c7e69797-e4c2-493e-869f-b1e6ef0b95b0" containerName="dnsmasq-dns" containerID="cri-o://32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8" gracePeriod=10 Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.155600 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.192446 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-qrlh2"] Feb 27 16:46:48 crc kubenswrapper[4751]: E0227 16:46:48.192788 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3478d8a7-c396-4368-81eb-00d79c45c5b7" containerName="keystone-db-sync" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.192804 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3478d8a7-c396-4368-81eb-00d79c45c5b7" containerName="keystone-db-sync" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.192988 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3478d8a7-c396-4368-81eb-00d79c45c5b7" containerName="keystone-db-sync" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.193751 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.209457 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-77dr5"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.210742 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.212865 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.213072 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hvwps" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.213222 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.213355 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.213720 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.255517 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-qrlh2"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.269465 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-77dr5"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.324851 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.324901 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-config\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.324929 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-fernet-keys\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.324947 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-scripts\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.324962 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkr9w\" (UniqueName: \"kubernetes.io/projected/a31b9417-fc6b-4faa-bdab-b800ff4700b7-kube-api-access-qkr9w\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.325055 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.325078 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b7x6\" (UniqueName: \"kubernetes.io/projected/781fc85b-adcf-4803-88fc-f85a0d025647-kube-api-access-5b7x6\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.325126 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-config-data\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.325145 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-svc\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.325177 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-combined-ca-bundle\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.325199 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.325216 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-credential-keys\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.402245 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-fbdnl"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.403703 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.407760 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.410565 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-4c7l7" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.410780 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426338 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-fbdnl"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426468 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426555 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-config\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426596 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-fernet-keys\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426619 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-scripts\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426639 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkr9w\" (UniqueName: \"kubernetes.io/projected/a31b9417-fc6b-4faa-bdab-b800ff4700b7-kube-api-access-qkr9w\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426708 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426731 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b7x6\" (UniqueName: \"kubernetes.io/projected/781fc85b-adcf-4803-88fc-f85a0d025647-kube-api-access-5b7x6\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426803 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-config-data\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426820 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-svc\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426870 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-combined-ca-bundle\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426893 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.426927 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-credential-keys\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.435605 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.436236 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.436921 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-config\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.440517 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-fernet-keys\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.445015 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-svc\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.446314 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-scripts\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.446681 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-credential-keys\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.446887 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.451032 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-combined-ca-bundle\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.457915 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-config-data\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.469548 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b7x6\" (UniqueName: \"kubernetes.io/projected/781fc85b-adcf-4803-88fc-f85a0d025647-kube-api-access-5b7x6\") pod \"dnsmasq-dns-5959f8865f-qrlh2\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.470012 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkr9w\" (UniqueName: \"kubernetes.io/projected/a31b9417-fc6b-4faa-bdab-b800ff4700b7-kube-api-access-qkr9w\") pod \"keystone-bootstrap-77dr5\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.513988 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.528921 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-config\") pod \"neutron-db-sync-fbdnl\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.528979 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qzw9\" (UniqueName: \"kubernetes.io/projected/c0298748-d6b6-46e7-a34d-381cf00a4aed-kube-api-access-4qzw9\") pod \"neutron-db-sync-fbdnl\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.529008 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-combined-ca-bundle\") pod \"neutron-db-sync-fbdnl\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.570058 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-6s84l"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.570919 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-qrlh2"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.570995 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.576438 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-76bv8" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.576691 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.576820 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.597593 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-6s84l"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.615679 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.623190 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-nq6b4"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.647537 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.675046 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-config\") pod \"neutron-db-sync-fbdnl\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.675151 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qzw9\" (UniqueName: \"kubernetes.io/projected/c0298748-d6b6-46e7-a34d-381cf00a4aed-kube-api-access-4qzw9\") pod \"neutron-db-sync-fbdnl\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.675200 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-combined-ca-bundle\") pod \"neutron-db-sync-fbdnl\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.679709 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-combined-ca-bundle\") pod \"neutron-db-sync-fbdnl\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.680968 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-config\") pod \"neutron-db-sync-fbdnl\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.726104 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qzw9\" (UniqueName: \"kubernetes.io/projected/c0298748-d6b6-46e7-a34d-381cf00a4aed-kube-api-access-4qzw9\") pod \"neutron-db-sync-fbdnl\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.740890 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-mvlkh"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.741938 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.748824 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.748963 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.749147 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-78b27" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.766257 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-nq6b4"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.782198 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.782277 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.782301 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.782325 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc5dm\" (UniqueName: \"kubernetes.io/projected/f306ad12-0f04-4414-8393-8ab5cc63c8b5-kube-api-access-fc5dm\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.782390 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-config-data\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.782433 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-config\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.782850 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-mvlkh"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.782466 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-scripts\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.783232 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6llpc\" (UniqueName: \"kubernetes.io/projected/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-kube-api-access-6llpc\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.783269 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f306ad12-0f04-4414-8393-8ab5cc63c8b5-logs\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.783293 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-db-sync-config-data\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.783346 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-combined-ca-bundle\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.786362 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-config-data\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.786397 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.786522 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dmpn\" (UniqueName: \"kubernetes.io/projected/e72931dc-c81e-4f44-8e6b-72fab4e429b4-kube-api-access-2dmpn\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.786544 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-combined-ca-bundle\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.786566 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-etc-machine-id\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.786650 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-scripts\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.787353 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.794319 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-vgcdl"] Feb 27 16:46:48 crc kubenswrapper[4751]: E0227 16:46:48.794729 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7e69797-e4c2-493e-869f-b1e6ef0b95b0" containerName="init" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.794746 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7e69797-e4c2-493e-869f-b1e6ef0b95b0" containerName="init" Feb 27 16:46:48 crc kubenswrapper[4751]: E0227 16:46:48.794764 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7e69797-e4c2-493e-869f-b1e6ef0b95b0" containerName="dnsmasq-dns" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.794772 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7e69797-e4c2-493e-869f-b1e6ef0b95b0" containerName="dnsmasq-dns" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.794927 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7e69797-e4c2-493e-869f-b1e6ef0b95b0" containerName="dnsmasq-dns" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.795447 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.798140 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-p4xqw" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.798307 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.831144 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-vgcdl"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.834856 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.857392 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.859904 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.865807 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.866059 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.887278 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.887977 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-sb\") pod \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.888290 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-nb\") pod \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.888510 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-config\") pod \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.888583 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-svc\") pod \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.888602 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pljvg\" (UniqueName: \"kubernetes.io/projected/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-kube-api-access-pljvg\") pod \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.888631 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-swift-storage-0\") pod \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\" (UID: \"c7e69797-e4c2-493e-869f-b1e6ef0b95b0\") " Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890385 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-config-data\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890477 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-config\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890549 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-scripts\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890625 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-combined-ca-bundle\") pod \"barbican-db-sync-vgcdl\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890647 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6llpc\" (UniqueName: \"kubernetes.io/projected/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-kube-api-access-6llpc\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890682 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f306ad12-0f04-4414-8393-8ab5cc63c8b5-logs\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890703 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-db-sync-config-data\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890718 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-scripts\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890770 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-run-httpd\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890790 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-combined-ca-bundle\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890815 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-config-data\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890855 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890888 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890932 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dmpn\" (UniqueName: \"kubernetes.io/projected/e72931dc-c81e-4f44-8e6b-72fab4e429b4-kube-api-access-2dmpn\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890951 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-combined-ca-bundle\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.890967 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-log-httpd\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891023 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-etc-machine-id\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891041 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkn2q\" (UniqueName: \"kubernetes.io/projected/80c6b259-7f53-44bc-9230-adeacd7d9cf6-kube-api-access-fkn2q\") pod \"barbican-db-sync-vgcdl\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891276 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-db-sync-config-data\") pod \"barbican-db-sync-vgcdl\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891301 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891478 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-scripts\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891520 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891542 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-config-data\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891563 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891689 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjtm2\" (UniqueName: \"kubernetes.io/projected/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-kube-api-access-wjtm2\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891709 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.892130 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-config\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.892955 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f306ad12-0f04-4414-8393-8ab5cc63c8b5-logs\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.892998 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.891732 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fc5dm\" (UniqueName: \"kubernetes.io/projected/f306ad12-0f04-4414-8393-8ab5cc63c8b5-kube-api-access-fc5dm\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.894582 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-config-data\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.896940 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-scripts\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.897260 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-kube-api-access-pljvg" (OuterVolumeSpecName: "kube-api-access-pljvg") pod "c7e69797-e4c2-493e-869f-b1e6ef0b95b0" (UID: "c7e69797-e4c2-493e-869f-b1e6ef0b95b0"). InnerVolumeSpecName "kube-api-access-pljvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.897388 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-etc-machine-id\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.898263 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.898456 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.898775 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.900783 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-db-sync-config-data\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.903224 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-combined-ca-bundle\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.907116 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-scripts\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.907285 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-config-data\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.909513 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-combined-ca-bundle\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.911185 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fc5dm\" (UniqueName: \"kubernetes.io/projected/f306ad12-0f04-4414-8393-8ab5cc63c8b5-kube-api-access-fc5dm\") pod \"placement-db-sync-mvlkh\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.919955 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6llpc\" (UniqueName: \"kubernetes.io/projected/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-kube-api-access-6llpc\") pod \"cinder-db-sync-6s84l\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.928898 4751 generic.go:334] "Generic (PLEG): container finished" podID="c7e69797-e4c2-493e-869f-b1e6ef0b95b0" containerID="32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8" exitCode=0 Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.928945 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" event={"ID":"c7e69797-e4c2-493e-869f-b1e6ef0b95b0","Type":"ContainerDied","Data":"32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8"} Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.928971 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" event={"ID":"c7e69797-e4c2-493e-869f-b1e6ef0b95b0","Type":"ContainerDied","Data":"f0e799d23dcafe7c62f996ceae32b84497f6c0c30c0de0244abcbf913d439b6b"} Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.928988 4751 scope.go:117] "RemoveContainer" containerID="32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.929090 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.933783 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dmpn\" (UniqueName: \"kubernetes.io/projected/e72931dc-c81e-4f44-8e6b-72fab4e429b4-kube-api-access-2dmpn\") pod \"dnsmasq-dns-58dd9ff6bc-nq6b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.967994 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-6s84l" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.974982 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c7e69797-e4c2-493e-869f-b1e6ef0b95b0" (UID: "c7e69797-e4c2-493e-869f-b1e6ef0b95b0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.978331 4751 scope.go:117] "RemoveContainer" containerID="c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.984783 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c7e69797-e4c2-493e-869f-b1e6ef0b95b0" (UID: "c7e69797-e4c2-493e-869f-b1e6ef0b95b0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.992832 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c7e69797-e4c2-493e-869f-b1e6ef0b95b0" (UID: "c7e69797-e4c2-493e-869f-b1e6ef0b95b0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.992898 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.995383 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-config-data\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.995854 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjtm2\" (UniqueName: \"kubernetes.io/projected/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-kube-api-access-wjtm2\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.995940 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-combined-ca-bundle\") pod \"barbican-db-sync-vgcdl\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.995964 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-scripts\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.995992 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-run-httpd\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.996039 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.996063 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-log-httpd\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.996084 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkn2q\" (UniqueName: \"kubernetes.io/projected/80c6b259-7f53-44bc-9230-adeacd7d9cf6-kube-api-access-fkn2q\") pod \"barbican-db-sync-vgcdl\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.996120 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-db-sync-config-data\") pod \"barbican-db-sync-vgcdl\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.996139 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.996190 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pljvg\" (UniqueName: \"kubernetes.io/projected/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-kube-api-access-pljvg\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.996201 4751 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.996210 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.996219 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.996986 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-run-httpd\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:48 crc kubenswrapper[4751]: I0227 16:46:48.998033 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-log-httpd\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.000695 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-combined-ca-bundle\") pod \"barbican-db-sync-vgcdl\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.004038 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.004965 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-scripts\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.013024 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.013838 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-db-sync-config-data\") pod \"barbican-db-sync-vgcdl\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.014036 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-config" (OuterVolumeSpecName: "config") pod "c7e69797-e4c2-493e-869f-b1e6ef0b95b0" (UID: "c7e69797-e4c2-493e-869f-b1e6ef0b95b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.016195 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjtm2\" (UniqueName: \"kubernetes.io/projected/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-kube-api-access-wjtm2\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.017386 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-config-data\") pod \"ceilometer-0\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " pod="openstack/ceilometer-0" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.019834 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkn2q\" (UniqueName: \"kubernetes.io/projected/80c6b259-7f53-44bc-9230-adeacd7d9cf6-kube-api-access-fkn2q\") pod \"barbican-db-sync-vgcdl\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.036691 4751 scope.go:117] "RemoveContainer" containerID="32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8" Feb 27 16:46:49 crc kubenswrapper[4751]: E0227 16:46:49.037164 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8\": container with ID starting with 32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8 not found: ID does not exist" containerID="32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.037207 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8"} err="failed to get container status \"32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8\": rpc error: code = NotFound desc = could not find container \"32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8\": container with ID starting with 32fc9678983aa4377d171904a586e46c82b73dd67a76951324580708a6ac3ac8 not found: ID does not exist" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.037229 4751 scope.go:117] "RemoveContainer" containerID="c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c" Feb 27 16:46:49 crc kubenswrapper[4751]: E0227 16:46:49.037536 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c\": container with ID starting with c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c not found: ID does not exist" containerID="c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.037575 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c"} err="failed to get container status \"c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c\": rpc error: code = NotFound desc = could not find container \"c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c\": container with ID starting with c7d5f7d113931c743c6730cd11dcf967da885f9386e349326facd1adb3ce5d4c not found: ID does not exist" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.043272 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c7e69797-e4c2-493e-869f-b1e6ef0b95b0" (UID: "c7e69797-e4c2-493e-869f-b1e6ef0b95b0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.069048 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mvlkh" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.097249 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.097666 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7e69797-e4c2-493e-869f-b1e6ef0b95b0-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.136965 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.148536 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-77dr5"] Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.178383 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-qrlh2"] Feb 27 16:46:49 crc kubenswrapper[4751]: W0227 16:46:49.180014 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda31b9417_fc6b_4faa_bdab_b800ff4700b7.slice/crio-c5f117ef207dd087d3345e4d3f9d05575abd5d8c7de611778722e08d3fe4c076 WatchSource:0}: Error finding container c5f117ef207dd087d3345e4d3f9d05575abd5d8c7de611778722e08d3fe4c076: Status 404 returned error can't find the container with id c5f117ef207dd087d3345e4d3f9d05575abd5d8c7de611778722e08d3fe4c076 Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.185150 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:46:49 crc kubenswrapper[4751]: W0227 16:46:49.187887 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod781fc85b_adcf_4803_88fc_f85a0d025647.slice/crio-012d5ce6119fa8c80d415a5423a949d724b8e46327d024f9466ec5b2affb96ba WatchSource:0}: Error finding container 012d5ce6119fa8c80d415a5423a949d724b8e46327d024f9466ec5b2affb96ba: Status 404 returned error can't find the container with id 012d5ce6119fa8c80d415a5423a949d724b8e46327d024f9466ec5b2affb96ba Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.381719 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-fbdnl"] Feb 27 16:46:49 crc kubenswrapper[4751]: W0227 16:46:49.418963 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0298748_d6b6_46e7_a34d_381cf00a4aed.slice/crio-3b0f2544ba25c990c4a533199139d0d6240126c076e17b4e713d915f7e086a8f WatchSource:0}: Error finding container 3b0f2544ba25c990c4a533199139d0d6240126c076e17b4e713d915f7e086a8f: Status 404 returned error can't find the container with id 3b0f2544ba25c990c4a533199139d0d6240126c076e17b4e713d915f7e086a8f Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.550379 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-4jwzm"] Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.560025 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-4jwzm"] Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.725757 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-nq6b4"] Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.732366 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-mvlkh"] Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.740494 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-6s84l"] Feb 27 16:46:49 crc kubenswrapper[4751]: W0227 16:46:49.742569 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode72931dc_c81e_4f44_8e6b_72fab4e429b4.slice/crio-b2b15e34fd57e45c1867b44456c2b3b70031ad1084d093537ea54ff48542faa2 WatchSource:0}: Error finding container b2b15e34fd57e45c1867b44456c2b3b70031ad1084d093537ea54ff48542faa2: Status 404 returned error can't find the container with id b2b15e34fd57e45c1867b44456c2b3b70031ad1084d093537ea54ff48542faa2 Feb 27 16:46:49 crc kubenswrapper[4751]: W0227 16:46:49.744177 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5af617b_32bc_43a9_a8e0_6bb1fec1b4df.slice/crio-895fe98bce37f73a009722c40ac87803ff592a2e5d6f63da4c04560fac1d0b8c WatchSource:0}: Error finding container 895fe98bce37f73a009722c40ac87803ff592a2e5d6f63da4c04560fac1d0b8c: Status 404 returned error can't find the container with id 895fe98bce37f73a009722c40ac87803ff592a2e5d6f63da4c04560fac1d0b8c Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.907992 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-vgcdl"] Feb 27 16:46:49 crc kubenswrapper[4751]: W0227 16:46:49.914792 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod80c6b259_7f53_44bc_9230_adeacd7d9cf6.slice/crio-90670478599c3af368915df14832459d1b379ff067034f5d76556cccab9911e6 WatchSource:0}: Error finding container 90670478599c3af368915df14832459d1b379ff067034f5d76556cccab9911e6: Status 404 returned error can't find the container with id 90670478599c3af368915df14832459d1b379ff067034f5d76556cccab9911e6 Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.915017 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.943246 4751 generic.go:334] "Generic (PLEG): container finished" podID="781fc85b-adcf-4803-88fc-f85a0d025647" containerID="3423737aa7ca591c2e31c148e98dbe58f89e2369e00559c195d773a4db163263" exitCode=0 Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.943335 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" event={"ID":"781fc85b-adcf-4803-88fc-f85a0d025647","Type":"ContainerDied","Data":"3423737aa7ca591c2e31c148e98dbe58f89e2369e00559c195d773a4db163263"} Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.943423 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" event={"ID":"781fc85b-adcf-4803-88fc-f85a0d025647","Type":"ContainerStarted","Data":"012d5ce6119fa8c80d415a5423a949d724b8e46327d024f9466ec5b2affb96ba"} Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.950118 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fbdnl" event={"ID":"c0298748-d6b6-46e7-a34d-381cf00a4aed","Type":"ContainerStarted","Data":"bb211c8ba53260784d253920b3c61cb5553b9d7118a710edd1a842fc4533fd91"} Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.950166 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fbdnl" event={"ID":"c0298748-d6b6-46e7-a34d-381cf00a4aed","Type":"ContainerStarted","Data":"3b0f2544ba25c990c4a533199139d0d6240126c076e17b4e713d915f7e086a8f"} Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.957186 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-77dr5" event={"ID":"a31b9417-fc6b-4faa-bdab-b800ff4700b7","Type":"ContainerStarted","Data":"b7da592a3fd5743b745f60dd492e79d806ef290d43c2abc47fe9b2e656118c00"} Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.957210 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-77dr5" event={"ID":"a31b9417-fc6b-4faa-bdab-b800ff4700b7","Type":"ContainerStarted","Data":"c5f117ef207dd087d3345e4d3f9d05575abd5d8c7de611778722e08d3fe4c076"} Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.958729 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bba51b8-4ef1-418b-86b4-59e9e52a6cac","Type":"ContainerStarted","Data":"b8ba57b901623b4cd8a77d53c8ca19d66768deae1afb88d5fb08bb1a75c853c8"} Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.959847 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mvlkh" event={"ID":"f306ad12-0f04-4414-8393-8ab5cc63c8b5","Type":"ContainerStarted","Data":"5b32878aa739d511caeac604c3e367efefc6152d52123084c795c93eb94ed5f5"} Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.972490 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vgcdl" event={"ID":"80c6b259-7f53-44bc-9230-adeacd7d9cf6","Type":"ContainerStarted","Data":"90670478599c3af368915df14832459d1b379ff067034f5d76556cccab9911e6"} Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.974265 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" event={"ID":"e72931dc-c81e-4f44-8e6b-72fab4e429b4","Type":"ContainerStarted","Data":"b2b15e34fd57e45c1867b44456c2b3b70031ad1084d093537ea54ff48542faa2"} Feb 27 16:46:49 crc kubenswrapper[4751]: I0227 16:46:49.979518 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-6s84l" event={"ID":"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df","Type":"ContainerStarted","Data":"895fe98bce37f73a009722c40ac87803ff592a2e5d6f63da4c04560fac1d0b8c"} Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.022068 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-fbdnl" podStartSLOduration=2.022043987 podStartE2EDuration="2.022043987s" podCreationTimestamp="2026-02-27 16:46:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:46:49.98434434 +0000 UTC m=+1372.131358797" watchObservedRunningTime="2026-02-27 16:46:50.022043987 +0000 UTC m=+1372.169058424" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.050492 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-77dr5" podStartSLOduration=2.050473437 podStartE2EDuration="2.050473437s" podCreationTimestamp="2026-02-27 16:46:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:46:50.006588924 +0000 UTC m=+1372.153603391" watchObservedRunningTime="2026-02-27 16:46:50.050473437 +0000 UTC m=+1372.197487884" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.224560 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.331849 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5b7x6\" (UniqueName: \"kubernetes.io/projected/781fc85b-adcf-4803-88fc-f85a0d025647-kube-api-access-5b7x6\") pod \"781fc85b-adcf-4803-88fc-f85a0d025647\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.331937 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-swift-storage-0\") pod \"781fc85b-adcf-4803-88fc-f85a0d025647\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.331980 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-nb\") pod \"781fc85b-adcf-4803-88fc-f85a0d025647\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.332064 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-svc\") pod \"781fc85b-adcf-4803-88fc-f85a0d025647\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.332104 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-sb\") pod \"781fc85b-adcf-4803-88fc-f85a0d025647\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.332167 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-config\") pod \"781fc85b-adcf-4803-88fc-f85a0d025647\" (UID: \"781fc85b-adcf-4803-88fc-f85a0d025647\") " Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.350652 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/781fc85b-adcf-4803-88fc-f85a0d025647-kube-api-access-5b7x6" (OuterVolumeSpecName: "kube-api-access-5b7x6") pod "781fc85b-adcf-4803-88fc-f85a0d025647" (UID: "781fc85b-adcf-4803-88fc-f85a0d025647"). InnerVolumeSpecName "kube-api-access-5b7x6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.362791 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "781fc85b-adcf-4803-88fc-f85a0d025647" (UID: "781fc85b-adcf-4803-88fc-f85a0d025647"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.363976 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "781fc85b-adcf-4803-88fc-f85a0d025647" (UID: "781fc85b-adcf-4803-88fc-f85a0d025647"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.367433 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "781fc85b-adcf-4803-88fc-f85a0d025647" (UID: "781fc85b-adcf-4803-88fc-f85a0d025647"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.388183 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "781fc85b-adcf-4803-88fc-f85a0d025647" (UID: "781fc85b-adcf-4803-88fc-f85a0d025647"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.400330 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-config" (OuterVolumeSpecName: "config") pod "781fc85b-adcf-4803-88fc-f85a0d025647" (UID: "781fc85b-adcf-4803-88fc-f85a0d025647"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.435941 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5b7x6\" (UniqueName: \"kubernetes.io/projected/781fc85b-adcf-4803-88fc-f85a0d025647-kube-api-access-5b7x6\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.435969 4751 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.435979 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.435989 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.435997 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.436006 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/781fc85b-adcf-4803-88fc-f85a0d025647-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.502154 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:46:50 crc kubenswrapper[4751]: I0227 16:46:50.565676 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7e69797-e4c2-493e-869f-b1e6ef0b95b0" path="/var/lib/kubelet/pods/c7e69797-e4c2-493e-869f-b1e6ef0b95b0/volumes" Feb 27 16:46:51 crc kubenswrapper[4751]: I0227 16:46:51.004751 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" event={"ID":"781fc85b-adcf-4803-88fc-f85a0d025647","Type":"ContainerDied","Data":"012d5ce6119fa8c80d415a5423a949d724b8e46327d024f9466ec5b2affb96ba"} Feb 27 16:46:51 crc kubenswrapper[4751]: I0227 16:46:51.004781 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-qrlh2" Feb 27 16:46:51 crc kubenswrapper[4751]: I0227 16:46:51.004812 4751 scope.go:117] "RemoveContainer" containerID="3423737aa7ca591c2e31c148e98dbe58f89e2369e00559c195d773a4db163263" Feb 27 16:46:51 crc kubenswrapper[4751]: I0227 16:46:51.009930 4751 generic.go:334] "Generic (PLEG): container finished" podID="e72931dc-c81e-4f44-8e6b-72fab4e429b4" containerID="a5cb05deb68bba72a0d13b365474189ae8b4505f7f791a7924ae015760498180" exitCode=0 Feb 27 16:46:51 crc kubenswrapper[4751]: I0227 16:46:51.010135 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" event={"ID":"e72931dc-c81e-4f44-8e6b-72fab4e429b4","Type":"ContainerDied","Data":"a5cb05deb68bba72a0d13b365474189ae8b4505f7f791a7924ae015760498180"} Feb 27 16:46:51 crc kubenswrapper[4751]: I0227 16:46:51.010341 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" event={"ID":"e72931dc-c81e-4f44-8e6b-72fab4e429b4","Type":"ContainerStarted","Data":"56831ba4dd315976c6290526492af98a99941ca79611171f8debd770c9e4e37c"} Feb 27 16:46:51 crc kubenswrapper[4751]: I0227 16:46:51.011595 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:51 crc kubenswrapper[4751]: I0227 16:46:51.044303 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" podStartSLOduration=3.044282525 podStartE2EDuration="3.044282525s" podCreationTimestamp="2026-02-27 16:46:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:46:51.041908711 +0000 UTC m=+1373.188923178" watchObservedRunningTime="2026-02-27 16:46:51.044282525 +0000 UTC m=+1373.191296972" Feb 27 16:46:51 crc kubenswrapper[4751]: I0227 16:46:51.084284 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-qrlh2"] Feb 27 16:46:51 crc kubenswrapper[4751]: I0227 16:46:51.107486 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-qrlh2"] Feb 27 16:46:52 crc kubenswrapper[4751]: I0227 16:46:52.537140 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="781fc85b-adcf-4803-88fc-f85a0d025647" path="/var/lib/kubelet/pods/781fc85b-adcf-4803-88fc-f85a0d025647/volumes" Feb 27 16:46:53 crc kubenswrapper[4751]: I0227 16:46:53.047856 4751 generic.go:334] "Generic (PLEG): container finished" podID="51f9db5a-ab58-4795-b09f-c2df5406c0cf" containerID="62783f7a506513bf1395f3c504deef531a9030d5bf80991511d067dd84478217" exitCode=0 Feb 27 16:46:53 crc kubenswrapper[4751]: I0227 16:46:53.048000 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2pj4h" event={"ID":"51f9db5a-ab58-4795-b09f-c2df5406c0cf","Type":"ContainerDied","Data":"62783f7a506513bf1395f3c504deef531a9030d5bf80991511d067dd84478217"} Feb 27 16:46:53 crc kubenswrapper[4751]: I0227 16:46:53.492609 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-4jwzm" podUID="c7e69797-e4c2-493e-869f-b1e6ef0b95b0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.141:5353: i/o timeout" Feb 27 16:46:54 crc kubenswrapper[4751]: I0227 16:46:54.058180 4751 generic.go:334] "Generic (PLEG): container finished" podID="a31b9417-fc6b-4faa-bdab-b800ff4700b7" containerID="b7da592a3fd5743b745f60dd492e79d806ef290d43c2abc47fe9b2e656118c00" exitCode=0 Feb 27 16:46:54 crc kubenswrapper[4751]: I0227 16:46:54.058273 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-77dr5" event={"ID":"a31b9417-fc6b-4faa-bdab-b800ff4700b7","Type":"ContainerDied","Data":"b7da592a3fd5743b745f60dd492e79d806ef290d43c2abc47fe9b2e656118c00"} Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.107922 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2pj4h" event={"ID":"51f9db5a-ab58-4795-b09f-c2df5406c0cf","Type":"ContainerDied","Data":"d9f20201fc66c5e1178a792e1fe60392f6b35aff35c7e35455e08a1734427403"} Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.108509 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9f20201fc66c5e1178a792e1fe60392f6b35aff35c7e35455e08a1734427403" Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.183190 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.305618 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-db-sync-config-data\") pod \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.305756 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-combined-ca-bundle\") pod \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.305825 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gfvz\" (UniqueName: \"kubernetes.io/projected/51f9db5a-ab58-4795-b09f-c2df5406c0cf-kube-api-access-7gfvz\") pod \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.305952 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-config-data\") pod \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\" (UID: \"51f9db5a-ab58-4795-b09f-c2df5406c0cf\") " Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.313694 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51f9db5a-ab58-4795-b09f-c2df5406c0cf-kube-api-access-7gfvz" (OuterVolumeSpecName: "kube-api-access-7gfvz") pod "51f9db5a-ab58-4795-b09f-c2df5406c0cf" (UID: "51f9db5a-ab58-4795-b09f-c2df5406c0cf"). InnerVolumeSpecName "kube-api-access-7gfvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.328273 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "51f9db5a-ab58-4795-b09f-c2df5406c0cf" (UID: "51f9db5a-ab58-4795-b09f-c2df5406c0cf"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.335252 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "51f9db5a-ab58-4795-b09f-c2df5406c0cf" (UID: "51f9db5a-ab58-4795-b09f-c2df5406c0cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.380713 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-config-data" (OuterVolumeSpecName: "config-data") pod "51f9db5a-ab58-4795-b09f-c2df5406c0cf" (UID: "51f9db5a-ab58-4795-b09f-c2df5406c0cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.407779 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.407811 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gfvz\" (UniqueName: \"kubernetes.io/projected/51f9db5a-ab58-4795-b09f-c2df5406c0cf-kube-api-access-7gfvz\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.407823 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.407831 4751 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/51f9db5a-ab58-4795-b09f-c2df5406c0cf-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:46:58 crc kubenswrapper[4751]: I0227 16:46:58.995639 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.077384 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-6px47"] Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.078616 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-6px47" podUID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" containerName="dnsmasq-dns" containerID="cri-o://baa27114786c2636b41d3933ceb7f4a7e53697beb788d8c6d80f5665a61be24e" gracePeriod=10 Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.125902 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2pj4h" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.593139 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-bb8z8"] Feb 27 16:46:59 crc kubenswrapper[4751]: E0227 16:46:59.593827 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="781fc85b-adcf-4803-88fc-f85a0d025647" containerName="init" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.593841 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="781fc85b-adcf-4803-88fc-f85a0d025647" containerName="init" Feb 27 16:46:59 crc kubenswrapper[4751]: E0227 16:46:59.593861 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51f9db5a-ab58-4795-b09f-c2df5406c0cf" containerName="glance-db-sync" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.593867 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="51f9db5a-ab58-4795-b09f-c2df5406c0cf" containerName="glance-db-sync" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.594191 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="781fc85b-adcf-4803-88fc-f85a0d025647" containerName="init" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.594219 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="51f9db5a-ab58-4795-b09f-c2df5406c0cf" containerName="glance-db-sync" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.595144 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.618490 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-bb8z8"] Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.732190 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.732417 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-config\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.732522 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.732791 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.732836 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4nhv\" (UniqueName: \"kubernetes.io/projected/6c666273-be4c-420f-a8d0-858a389c124f-kube-api-access-d4nhv\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.732858 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.834391 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.834470 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.834496 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4nhv\" (UniqueName: \"kubernetes.io/projected/6c666273-be4c-420f-a8d0-858a389c124f-kube-api-access-d4nhv\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.834549 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.834614 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-config\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.834658 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.835587 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.835609 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.835713 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.835772 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-config\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.835857 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.876967 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4nhv\" (UniqueName: \"kubernetes.io/projected/6c666273-be4c-420f-a8d0-858a389c124f-kube-api-access-d4nhv\") pod \"dnsmasq-dns-785d8bcb8c-bb8z8\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:46:59 crc kubenswrapper[4751]: I0227 16:46:59.920102 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.145883 4751 generic.go:334] "Generic (PLEG): container finished" podID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" containerID="baa27114786c2636b41d3933ceb7f4a7e53697beb788d8c6d80f5665a61be24e" exitCode=0 Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.146072 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-6px47" event={"ID":"c68647f7-0ae5-4339-9449-b492f1e3b6b9","Type":"ContainerDied","Data":"baa27114786c2636b41d3933ceb7f4a7e53697beb788d8c6d80f5665a61be24e"} Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.645646 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.654555 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.658677 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.659001 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-n4zl7" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.661340 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.665113 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.759740 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-config-data\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.759824 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkjch\" (UniqueName: \"kubernetes.io/projected/cdb4412f-6028-4cd6-9a52-ca11a99a6474-kube-api-access-lkjch\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.759894 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.759915 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-scripts\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.759940 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.759975 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.760365 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-logs\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.822163 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.823631 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.826749 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.833175 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.861594 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-config-data\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.861664 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkjch\" (UniqueName: \"kubernetes.io/projected/cdb4412f-6028-4cd6-9a52-ca11a99a6474-kube-api-access-lkjch\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.861712 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.861754 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-scripts\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.861777 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.861793 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.861868 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-logs\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.862443 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-logs\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.863281 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.863845 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.867757 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.870988 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-scripts\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.872493 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-config-data\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.881871 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkjch\" (UniqueName: \"kubernetes.io/projected/cdb4412f-6028-4cd6-9a52-ca11a99a6474-kube-api-access-lkjch\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.914661 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.963120 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.963170 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-logs\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.963266 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.963296 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.963336 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.963368 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnjxp\" (UniqueName: \"kubernetes.io/projected/e68bea53-8a9e-4229-bd71-1c6aeca5202c-kube-api-access-mnjxp\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.963388 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:00 crc kubenswrapper[4751]: I0227 16:47:00.980934 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.066355 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.066867 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.066916 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnjxp\" (UniqueName: \"kubernetes.io/projected/e68bea53-8a9e-4229-bd71-1c6aeca5202c-kube-api-access-mnjxp\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.066938 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.066954 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.067126 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.067169 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-logs\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.067305 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.067762 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.067940 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-logs\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.070710 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.072316 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.082277 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.093621 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnjxp\" (UniqueName: \"kubernetes.io/projected/e68bea53-8a9e-4229-bd71-1c6aeca5202c-kube-api-access-mnjxp\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.103186 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: I0227 16:47:01.151418 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:01 crc kubenswrapper[4751]: E0227 16:47:01.590131 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Feb 27 16:47:01 crc kubenswrapper[4751]: E0227 16:47:01.590284 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n56bh9h66fh586h677h66fh6h559h79h54ch64bh55h576h54h677h55h565h69h5dchf6h5b9h4h9dh66ch5ddh5d8h5c6h65ch6bhd4h648h544q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wjtm2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(5bba51b8-4ef1-418b-86b4-59e9e52a6cac): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 27 16:47:02 crc kubenswrapper[4751]: I0227 16:47:02.591557 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:47:02 crc kubenswrapper[4751]: I0227 16:47:02.654521 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:47:02 crc kubenswrapper[4751]: I0227 16:47:02.732778 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-6px47" podUID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.119:5353: connect: connection refused" Feb 27 16:47:07 crc kubenswrapper[4751]: I0227 16:47:07.733071 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-6px47" podUID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.119:5353: connect: connection refused" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.807737 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.843627 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-config-data\") pod \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.843743 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-scripts\") pod \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.843865 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-combined-ca-bundle\") pod \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.843915 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-fernet-keys\") pod \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.843984 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkr9w\" (UniqueName: \"kubernetes.io/projected/a31b9417-fc6b-4faa-bdab-b800ff4700b7-kube-api-access-qkr9w\") pod \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.844019 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-credential-keys\") pod \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\" (UID: \"a31b9417-fc6b-4faa-bdab-b800ff4700b7\") " Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.851710 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a31b9417-fc6b-4faa-bdab-b800ff4700b7" (UID: "a31b9417-fc6b-4faa-bdab-b800ff4700b7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.852520 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-scripts" (OuterVolumeSpecName: "scripts") pod "a31b9417-fc6b-4faa-bdab-b800ff4700b7" (UID: "a31b9417-fc6b-4faa-bdab-b800ff4700b7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.856842 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31b9417-fc6b-4faa-bdab-b800ff4700b7-kube-api-access-qkr9w" (OuterVolumeSpecName: "kube-api-access-qkr9w") pod "a31b9417-fc6b-4faa-bdab-b800ff4700b7" (UID: "a31b9417-fc6b-4faa-bdab-b800ff4700b7"). InnerVolumeSpecName "kube-api-access-qkr9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.868542 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "a31b9417-fc6b-4faa-bdab-b800ff4700b7" (UID: "a31b9417-fc6b-4faa-bdab-b800ff4700b7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.893039 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a31b9417-fc6b-4faa-bdab-b800ff4700b7" (UID: "a31b9417-fc6b-4faa-bdab-b800ff4700b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.894753 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-config-data" (OuterVolumeSpecName: "config-data") pod "a31b9417-fc6b-4faa-bdab-b800ff4700b7" (UID: "a31b9417-fc6b-4faa-bdab-b800ff4700b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.945775 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.945836 4751 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.945848 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkr9w\" (UniqueName: \"kubernetes.io/projected/a31b9417-fc6b-4faa-bdab-b800ff4700b7-kube-api-access-qkr9w\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.945861 4751 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-credential-keys\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.945874 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:09 crc kubenswrapper[4751]: I0227 16:47:09.945885 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a31b9417-fc6b-4faa-bdab-b800ff4700b7-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.223548 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-77dr5" event={"ID":"a31b9417-fc6b-4faa-bdab-b800ff4700b7","Type":"ContainerDied","Data":"c5f117ef207dd087d3345e4d3f9d05575abd5d8c7de611778722e08d3fe4c076"} Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.223617 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-77dr5" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.223635 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5f117ef207dd087d3345e4d3f9d05575abd5d8c7de611778722e08d3fe4c076" Feb 27 16:47:10 crc kubenswrapper[4751]: E0227 16:47:10.361064 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Feb 27 16:47:10 crc kubenswrapper[4751]: E0227 16:47:10.361227 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fkn2q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-vgcdl_openstack(80c6b259-7f53-44bc-9230-adeacd7d9cf6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 27 16:47:10 crc kubenswrapper[4751]: E0227 16:47:10.362769 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-vgcdl" podUID="80c6b259-7f53-44bc-9230-adeacd7d9cf6" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.888317 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-77dr5"] Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.896768 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-77dr5"] Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.993485 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-x847w"] Feb 27 16:47:10 crc kubenswrapper[4751]: E0227 16:47:10.994044 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a31b9417-fc6b-4faa-bdab-b800ff4700b7" containerName="keystone-bootstrap" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.994067 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a31b9417-fc6b-4faa-bdab-b800ff4700b7" containerName="keystone-bootstrap" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.994300 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a31b9417-fc6b-4faa-bdab-b800ff4700b7" containerName="keystone-bootstrap" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.994980 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.996941 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.997189 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.997302 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.997523 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 27 16:47:10 crc kubenswrapper[4751]: I0227 16:47:10.997980 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hvwps" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.003883 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-x847w"] Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.068463 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-credential-keys\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.068520 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-combined-ca-bundle\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.068560 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-scripts\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.068596 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-config-data\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.068883 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v9xd\" (UniqueName: \"kubernetes.io/projected/907bed97-620e-441c-9539-b8e62c988b52-kube-api-access-6v9xd\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.068985 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-fernet-keys\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.170735 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-scripts\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.170796 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-config-data\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.170875 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v9xd\" (UniqueName: \"kubernetes.io/projected/907bed97-620e-441c-9539-b8e62c988b52-kube-api-access-6v9xd\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.170899 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-fernet-keys\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.170932 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-credential-keys\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.170956 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-combined-ca-bundle\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.188394 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-credential-keys\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.188548 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-config-data\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.188870 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-combined-ca-bundle\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.189322 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-fernet-keys\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.190023 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-scripts\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.190693 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v9xd\" (UniqueName: \"kubernetes.io/projected/907bed97-620e-441c-9539-b8e62c988b52-kube-api-access-6v9xd\") pod \"keystone-bootstrap-x847w\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: E0227 16:47:11.235244 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-vgcdl" podUID="80c6b259-7f53-44bc-9230-adeacd7d9cf6" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.314276 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:11 crc kubenswrapper[4751]: E0227 16:47:11.466680 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Feb 27 16:47:11 crc kubenswrapper[4751]: E0227 16:47:11.466823 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6llpc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-6s84l_openstack(f5af617b-32bc-43a9-a8e0-6bb1fec1b4df): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 27 16:47:11 crc kubenswrapper[4751]: E0227 16:47:11.468551 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-6s84l" podUID="f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.588641 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.680597 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64shn\" (UniqueName: \"kubernetes.io/projected/c68647f7-0ae5-4339-9449-b492f1e3b6b9-kube-api-access-64shn\") pod \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.680993 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-sb\") pod \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.681078 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-config\") pod \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.681098 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-dns-svc\") pod \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.681181 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-nb\") pod \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\" (UID: \"c68647f7-0ae5-4339-9449-b492f1e3b6b9\") " Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.690040 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c68647f7-0ae5-4339-9449-b492f1e3b6b9-kube-api-access-64shn" (OuterVolumeSpecName: "kube-api-access-64shn") pod "c68647f7-0ae5-4339-9449-b492f1e3b6b9" (UID: "c68647f7-0ae5-4339-9449-b492f1e3b6b9"). InnerVolumeSpecName "kube-api-access-64shn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.721066 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c68647f7-0ae5-4339-9449-b492f1e3b6b9" (UID: "c68647f7-0ae5-4339-9449-b492f1e3b6b9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.725129 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-config" (OuterVolumeSpecName: "config") pod "c68647f7-0ae5-4339-9449-b492f1e3b6b9" (UID: "c68647f7-0ae5-4339-9449-b492f1e3b6b9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.727825 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c68647f7-0ae5-4339-9449-b492f1e3b6b9" (UID: "c68647f7-0ae5-4339-9449-b492f1e3b6b9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.728991 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c68647f7-0ae5-4339-9449-b492f1e3b6b9" (UID: "c68647f7-0ae5-4339-9449-b492f1e3b6b9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.783683 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.783730 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.783743 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.783758 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64shn\" (UniqueName: \"kubernetes.io/projected/c68647f7-0ae5-4339-9449-b492f1e3b6b9-kube-api-access-64shn\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.783773 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c68647f7-0ae5-4339-9449-b492f1e3b6b9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:11 crc kubenswrapper[4751]: I0227 16:47:11.942875 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-bb8z8"] Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.049159 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:47:12 crc kubenswrapper[4751]: W0227 16:47:12.061503 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcdb4412f_6028_4cd6_9a52_ca11a99a6474.slice/crio-eedb7ce55e762c201822bd4739718321fe574a4a53fd88cd8dac91a4cf2a5f61 WatchSource:0}: Error finding container eedb7ce55e762c201822bd4739718321fe574a4a53fd88cd8dac91a4cf2a5f61: Status 404 returned error can't find the container with id eedb7ce55e762c201822bd4739718321fe574a4a53fd88cd8dac91a4cf2a5f61 Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.255818 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-6px47" event={"ID":"c68647f7-0ae5-4339-9449-b492f1e3b6b9","Type":"ContainerDied","Data":"af563aa031123af53af22f855ede0fdfab46ea7eaf69ed76dc0093648e4c64ef"} Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.255865 4751 scope.go:117] "RemoveContainer" containerID="baa27114786c2636b41d3933ceb7f4a7e53697beb788d8c6d80f5665a61be24e" Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.255969 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-6px47" Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.263826 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mvlkh" event={"ID":"f306ad12-0f04-4414-8393-8ab5cc63c8b5","Type":"ContainerStarted","Data":"8b4d0f68e6478929263c6f04f2a21d33b3783a4100e3d80a0dd129b6b5595f24"} Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.267675 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cdb4412f-6028-4cd6-9a52-ca11a99a6474","Type":"ContainerStarted","Data":"eedb7ce55e762c201822bd4739718321fe574a4a53fd88cd8dac91a4cf2a5f61"} Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.271456 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bba51b8-4ef1-418b-86b4-59e9e52a6cac","Type":"ContainerStarted","Data":"f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271"} Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.273771 4751 generic.go:334] "Generic (PLEG): container finished" podID="6c666273-be4c-420f-a8d0-858a389c124f" containerID="4a28bd1c9e91e873f5ab7b4cc2bf511329afb34afd48fbaf670010994dd19b7f" exitCode=0 Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.274469 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" event={"ID":"6c666273-be4c-420f-a8d0-858a389c124f","Type":"ContainerDied","Data":"4a28bd1c9e91e873f5ab7b4cc2bf511329afb34afd48fbaf670010994dd19b7f"} Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.274504 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" event={"ID":"6c666273-be4c-420f-a8d0-858a389c124f","Type":"ContainerStarted","Data":"cfc104fad94858dea6049b1c29b14b3c1e5f586e307fe9be9084edc53fe4618c"} Feb 27 16:47:12 crc kubenswrapper[4751]: E0227 16:47:12.275025 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-6s84l" podUID="f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.287362 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-mvlkh" podStartSLOduration=2.572150592 podStartE2EDuration="24.287332608s" podCreationTimestamp="2026-02-27 16:46:48 +0000 UTC" firstStartedPulling="2026-02-27 16:46:49.745183481 +0000 UTC m=+1371.892197928" lastFinishedPulling="2026-02-27 16:47:11.460365467 +0000 UTC m=+1393.607379944" observedRunningTime="2026-02-27 16:47:12.281882333 +0000 UTC m=+1394.428896770" watchObservedRunningTime="2026-02-27 16:47:12.287332608 +0000 UTC m=+1394.434347055" Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.318501 4751 scope.go:117] "RemoveContainer" containerID="0deb72468fc72b012f613e715480ffe7175bc19240a8fea4df9f160b1950ec71" Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.377591 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-6px47"] Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.392930 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-6px47"] Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.401525 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-x847w"] Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.415353 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.531058 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31b9417-fc6b-4faa-bdab-b800ff4700b7" path="/var/lib/kubelet/pods/a31b9417-fc6b-4faa-bdab-b800ff4700b7/volumes" Feb 27 16:47:12 crc kubenswrapper[4751]: I0227 16:47:12.531620 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" path="/var/lib/kubelet/pods/c68647f7-0ae5-4339-9449-b492f1e3b6b9/volumes" Feb 27 16:47:13 crc kubenswrapper[4751]: I0227 16:47:13.291577 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" event={"ID":"6c666273-be4c-420f-a8d0-858a389c124f","Type":"ContainerStarted","Data":"92015060d4409dfac614cc5618cb83a61eb9abea5e2973f2f7b0a339ca2fd00d"} Feb 27 16:47:13 crc kubenswrapper[4751]: I0227 16:47:13.292035 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:47:13 crc kubenswrapper[4751]: I0227 16:47:13.295047 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e68bea53-8a9e-4229-bd71-1c6aeca5202c","Type":"ContainerStarted","Data":"ace8b9e1535375486a4e18cb42f1a8a93b8e9743b99b38035afbc3ba992b727c"} Feb 27 16:47:13 crc kubenswrapper[4751]: I0227 16:47:13.295083 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e68bea53-8a9e-4229-bd71-1c6aeca5202c","Type":"ContainerStarted","Data":"9a6124526d253156a204e074cb67ba51e44a4a5214ad5ebed10a5f07e9758ff5"} Feb 27 16:47:13 crc kubenswrapper[4751]: I0227 16:47:13.297396 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cdb4412f-6028-4cd6-9a52-ca11a99a6474","Type":"ContainerStarted","Data":"32c2285a2d8cbe283cea02e3b0a2cbd78b6caafd06a9554df4c7322136c2c845"} Feb 27 16:47:13 crc kubenswrapper[4751]: I0227 16:47:13.308218 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x847w" event={"ID":"907bed97-620e-441c-9539-b8e62c988b52","Type":"ContainerStarted","Data":"eceb2023fadf1f97b7f28b5686582ed93f0d37ed691285c381c8b92e120a66bd"} Feb 27 16:47:13 crc kubenswrapper[4751]: I0227 16:47:13.308649 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x847w" event={"ID":"907bed97-620e-441c-9539-b8e62c988b52","Type":"ContainerStarted","Data":"ea5f5e85f50e3f13640886f2bb2beb2cf18caebe778ea894ea6d1d7935d2029c"} Feb 27 16:47:13 crc kubenswrapper[4751]: I0227 16:47:13.314102 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" podStartSLOduration=14.314079345 podStartE2EDuration="14.314079345s" podCreationTimestamp="2026-02-27 16:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:13.30789747 +0000 UTC m=+1395.454911937" watchObservedRunningTime="2026-02-27 16:47:13.314079345 +0000 UTC m=+1395.461093812" Feb 27 16:47:13 crc kubenswrapper[4751]: I0227 16:47:13.324031 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-x847w" podStartSLOduration=3.32401538 podStartE2EDuration="3.32401538s" podCreationTimestamp="2026-02-27 16:47:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:13.322910811 +0000 UTC m=+1395.469925258" watchObservedRunningTime="2026-02-27 16:47:13.32401538 +0000 UTC m=+1395.471029827" Feb 27 16:47:14 crc kubenswrapper[4751]: I0227 16:47:14.318351 4751 generic.go:334] "Generic (PLEG): container finished" podID="c0298748-d6b6-46e7-a34d-381cf00a4aed" containerID="bb211c8ba53260784d253920b3c61cb5553b9d7118a710edd1a842fc4533fd91" exitCode=0 Feb 27 16:47:14 crc kubenswrapper[4751]: I0227 16:47:14.318463 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fbdnl" event={"ID":"c0298748-d6b6-46e7-a34d-381cf00a4aed","Type":"ContainerDied","Data":"bb211c8ba53260784d253920b3c61cb5553b9d7118a710edd1a842fc4533fd91"} Feb 27 16:47:14 crc kubenswrapper[4751]: I0227 16:47:14.320628 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e68bea53-8a9e-4229-bd71-1c6aeca5202c","Type":"ContainerStarted","Data":"e589592682b434ec4626756437d451c2078f149571f5abd15279eba34e38c5a2"} Feb 27 16:47:14 crc kubenswrapper[4751]: I0227 16:47:14.320741 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" containerName="glance-log" containerID="cri-o://ace8b9e1535375486a4e18cb42f1a8a93b8e9743b99b38035afbc3ba992b727c" gracePeriod=30 Feb 27 16:47:14 crc kubenswrapper[4751]: I0227 16:47:14.320816 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" containerName="glance-httpd" containerID="cri-o://e589592682b434ec4626756437d451c2078f149571f5abd15279eba34e38c5a2" gracePeriod=30 Feb 27 16:47:14 crc kubenswrapper[4751]: I0227 16:47:14.322646 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cdb4412f-6028-4cd6-9a52-ca11a99a6474","Type":"ContainerStarted","Data":"71dddc943cb8aa8207b8dc4f0989862707a111f6ce8dd6f9aa0991901be797e9"} Feb 27 16:47:14 crc kubenswrapper[4751]: I0227 16:47:14.322798 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" containerName="glance-log" containerID="cri-o://32c2285a2d8cbe283cea02e3b0a2cbd78b6caafd06a9554df4c7322136c2c845" gracePeriod=30 Feb 27 16:47:14 crc kubenswrapper[4751]: I0227 16:47:14.322890 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" containerName="glance-httpd" containerID="cri-o://71dddc943cb8aa8207b8dc4f0989862707a111f6ce8dd6f9aa0991901be797e9" gracePeriod=30 Feb 27 16:47:14 crc kubenswrapper[4751]: I0227 16:47:14.367927 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=15.367907776 podStartE2EDuration="15.367907776s" podCreationTimestamp="2026-02-27 16:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:14.367347971 +0000 UTC m=+1396.514362418" watchObservedRunningTime="2026-02-27 16:47:14.367907776 +0000 UTC m=+1396.514922223" Feb 27 16:47:14 crc kubenswrapper[4751]: I0227 16:47:14.397858 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=15.397837886 podStartE2EDuration="15.397837886s" podCreationTimestamp="2026-02-27 16:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:14.391742923 +0000 UTC m=+1396.538757370" watchObservedRunningTime="2026-02-27 16:47:14.397837886 +0000 UTC m=+1396.544852333" Feb 27 16:47:15 crc kubenswrapper[4751]: I0227 16:47:15.365769 4751 generic.go:334] "Generic (PLEG): container finished" podID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" containerID="71dddc943cb8aa8207b8dc4f0989862707a111f6ce8dd6f9aa0991901be797e9" exitCode=0 Feb 27 16:47:15 crc kubenswrapper[4751]: I0227 16:47:15.366080 4751 generic.go:334] "Generic (PLEG): container finished" podID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" containerID="32c2285a2d8cbe283cea02e3b0a2cbd78b6caafd06a9554df4c7322136c2c845" exitCode=143 Feb 27 16:47:15 crc kubenswrapper[4751]: I0227 16:47:15.366159 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cdb4412f-6028-4cd6-9a52-ca11a99a6474","Type":"ContainerDied","Data":"71dddc943cb8aa8207b8dc4f0989862707a111f6ce8dd6f9aa0991901be797e9"} Feb 27 16:47:15 crc kubenswrapper[4751]: I0227 16:47:15.366200 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cdb4412f-6028-4cd6-9a52-ca11a99a6474","Type":"ContainerDied","Data":"32c2285a2d8cbe283cea02e3b0a2cbd78b6caafd06a9554df4c7322136c2c845"} Feb 27 16:47:15 crc kubenswrapper[4751]: I0227 16:47:15.378341 4751 generic.go:334] "Generic (PLEG): container finished" podID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" containerID="e589592682b434ec4626756437d451c2078f149571f5abd15279eba34e38c5a2" exitCode=0 Feb 27 16:47:15 crc kubenswrapper[4751]: I0227 16:47:15.378377 4751 generic.go:334] "Generic (PLEG): container finished" podID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" containerID="ace8b9e1535375486a4e18cb42f1a8a93b8e9743b99b38035afbc3ba992b727c" exitCode=143 Feb 27 16:47:15 crc kubenswrapper[4751]: I0227 16:47:15.378560 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e68bea53-8a9e-4229-bd71-1c6aeca5202c","Type":"ContainerDied","Data":"e589592682b434ec4626756437d451c2078f149571f5abd15279eba34e38c5a2"} Feb 27 16:47:15 crc kubenswrapper[4751]: I0227 16:47:15.378626 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e68bea53-8a9e-4229-bd71-1c6aeca5202c","Type":"ContainerDied","Data":"ace8b9e1535375486a4e18cb42f1a8a93b8e9743b99b38035afbc3ba992b727c"} Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.389544 4751 generic.go:334] "Generic (PLEG): container finished" podID="f306ad12-0f04-4414-8393-8ab5cc63c8b5" containerID="8b4d0f68e6478929263c6f04f2a21d33b3783a4100e3d80a0dd129b6b5595f24" exitCode=0 Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.389628 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mvlkh" event={"ID":"f306ad12-0f04-4414-8393-8ab5cc63c8b5","Type":"ContainerDied","Data":"8b4d0f68e6478929263c6f04f2a21d33b3783a4100e3d80a0dd129b6b5595f24"} Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.392287 4751 generic.go:334] "Generic (PLEG): container finished" podID="907bed97-620e-441c-9539-b8e62c988b52" containerID="eceb2023fadf1f97b7f28b5686582ed93f0d37ed691285c381c8b92e120a66bd" exitCode=0 Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.392318 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x847w" event={"ID":"907bed97-620e-441c-9539-b8e62c988b52","Type":"ContainerDied","Data":"eceb2023fadf1f97b7f28b5686582ed93f0d37ed691285c381c8b92e120a66bd"} Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.778774 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.786391 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.807355 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.890343 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-config\") pod \"c0298748-d6b6-46e7-a34d-381cf00a4aed\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.890500 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qzw9\" (UniqueName: \"kubernetes.io/projected/c0298748-d6b6-46e7-a34d-381cf00a4aed-kube-api-access-4qzw9\") pod \"c0298748-d6b6-46e7-a34d-381cf00a4aed\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.890550 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.890596 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-combined-ca-bundle\") pod \"c0298748-d6b6-46e7-a34d-381cf00a4aed\" (UID: \"c0298748-d6b6-46e7-a34d-381cf00a4aed\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.890681 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-httpd-run\") pod \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.890740 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-logs\") pod \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.890796 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnjxp\" (UniqueName: \"kubernetes.io/projected/e68bea53-8a9e-4229-bd71-1c6aeca5202c-kube-api-access-mnjxp\") pod \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.890852 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.890925 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkjch\" (UniqueName: \"kubernetes.io/projected/cdb4412f-6028-4cd6-9a52-ca11a99a6474-kube-api-access-lkjch\") pod \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.891093 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e68bea53-8a9e-4229-bd71-1c6aeca5202c" (UID: "e68bea53-8a9e-4229-bd71-1c6aeca5202c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.891207 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-logs" (OuterVolumeSpecName: "logs") pod "e68bea53-8a9e-4229-bd71-1c6aeca5202c" (UID: "e68bea53-8a9e-4229-bd71-1c6aeca5202c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.890962 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-config-data\") pod \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.891450 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-scripts\") pod \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.891478 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-logs\") pod \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.891600 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-combined-ca-bundle\") pod \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.891670 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-config-data\") pod \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.891694 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-scripts\") pod \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\" (UID: \"e68bea53-8a9e-4229-bd71-1c6aeca5202c\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.891773 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-httpd-run\") pod \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.891799 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-combined-ca-bundle\") pod \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\" (UID: \"cdb4412f-6028-4cd6-9a52-ca11a99a6474\") " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.892125 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-logs" (OuterVolumeSpecName: "logs") pod "cdb4412f-6028-4cd6-9a52-ca11a99a6474" (UID: "cdb4412f-6028-4cd6-9a52-ca11a99a6474"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.892502 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "cdb4412f-6028-4cd6-9a52-ca11a99a6474" (UID: "cdb4412f-6028-4cd6-9a52-ca11a99a6474"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.895165 4751 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.896063 4751 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.896077 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e68bea53-8a9e-4229-bd71-1c6aeca5202c-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.896085 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cdb4412f-6028-4cd6-9a52-ca11a99a6474-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.898318 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "e68bea53-8a9e-4229-bd71-1c6aeca5202c" (UID: "e68bea53-8a9e-4229-bd71-1c6aeca5202c"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.898434 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e68bea53-8a9e-4229-bd71-1c6aeca5202c-kube-api-access-mnjxp" (OuterVolumeSpecName: "kube-api-access-mnjxp") pod "e68bea53-8a9e-4229-bd71-1c6aeca5202c" (UID: "e68bea53-8a9e-4229-bd71-1c6aeca5202c"). InnerVolumeSpecName "kube-api-access-mnjxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.898503 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0298748-d6b6-46e7-a34d-381cf00a4aed-kube-api-access-4qzw9" (OuterVolumeSpecName: "kube-api-access-4qzw9") pod "c0298748-d6b6-46e7-a34d-381cf00a4aed" (UID: "c0298748-d6b6-46e7-a34d-381cf00a4aed"). InnerVolumeSpecName "kube-api-access-4qzw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.900805 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-scripts" (OuterVolumeSpecName: "scripts") pod "e68bea53-8a9e-4229-bd71-1c6aeca5202c" (UID: "e68bea53-8a9e-4229-bd71-1c6aeca5202c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.909335 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdb4412f-6028-4cd6-9a52-ca11a99a6474-kube-api-access-lkjch" (OuterVolumeSpecName: "kube-api-access-lkjch") pod "cdb4412f-6028-4cd6-9a52-ca11a99a6474" (UID: "cdb4412f-6028-4cd6-9a52-ca11a99a6474"). InnerVolumeSpecName "kube-api-access-lkjch". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.909675 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "cdb4412f-6028-4cd6-9a52-ca11a99a6474" (UID: "cdb4412f-6028-4cd6-9a52-ca11a99a6474"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.909308 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-scripts" (OuterVolumeSpecName: "scripts") pod "cdb4412f-6028-4cd6-9a52-ca11a99a6474" (UID: "cdb4412f-6028-4cd6-9a52-ca11a99a6474"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.920619 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0298748-d6b6-46e7-a34d-381cf00a4aed" (UID: "c0298748-d6b6-46e7-a34d-381cf00a4aed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.921161 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-config" (OuterVolumeSpecName: "config") pod "c0298748-d6b6-46e7-a34d-381cf00a4aed" (UID: "c0298748-d6b6-46e7-a34d-381cf00a4aed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.926943 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e68bea53-8a9e-4229-bd71-1c6aeca5202c" (UID: "e68bea53-8a9e-4229-bd71-1c6aeca5202c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.927585 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cdb4412f-6028-4cd6-9a52-ca11a99a6474" (UID: "cdb4412f-6028-4cd6-9a52-ca11a99a6474"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.948338 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-config-data" (OuterVolumeSpecName: "config-data") pod "e68bea53-8a9e-4229-bd71-1c6aeca5202c" (UID: "e68bea53-8a9e-4229-bd71-1c6aeca5202c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.953985 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-config-data" (OuterVolumeSpecName: "config-data") pod "cdb4412f-6028-4cd6-9a52-ca11a99a6474" (UID: "cdb4412f-6028-4cd6-9a52-ca11a99a6474"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.997661 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.997869 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.997925 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.997983 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.998033 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qzw9\" (UniqueName: \"kubernetes.io/projected/c0298748-d6b6-46e7-a34d-381cf00a4aed-kube-api-access-4qzw9\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.998109 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.998160 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0298748-d6b6-46e7-a34d-381cf00a4aed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.998215 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnjxp\" (UniqueName: \"kubernetes.io/projected/e68bea53-8a9e-4229-bd71-1c6aeca5202c-kube-api-access-mnjxp\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.998275 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.998324 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkjch\" (UniqueName: \"kubernetes.io/projected/cdb4412f-6028-4cd6-9a52-ca11a99a6474-kube-api-access-lkjch\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.998371 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.998437 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cdb4412f-6028-4cd6-9a52-ca11a99a6474-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:16 crc kubenswrapper[4751]: I0227 16:47:16.998488 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68bea53-8a9e-4229-bd71-1c6aeca5202c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.032601 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.037919 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.100329 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.100369 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.406766 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cdb4412f-6028-4cd6-9a52-ca11a99a6474","Type":"ContainerDied","Data":"eedb7ce55e762c201822bd4739718321fe574a4a53fd88cd8dac91a4cf2a5f61"} Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.406842 4751 scope.go:117] "RemoveContainer" containerID="71dddc943cb8aa8207b8dc4f0989862707a111f6ce8dd6f9aa0991901be797e9" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.406797 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.411258 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bba51b8-4ef1-418b-86b4-59e9e52a6cac","Type":"ContainerStarted","Data":"34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698"} Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.413296 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fbdnl" event={"ID":"c0298748-d6b6-46e7-a34d-381cf00a4aed","Type":"ContainerDied","Data":"3b0f2544ba25c990c4a533199139d0d6240126c076e17b4e713d915f7e086a8f"} Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.413335 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b0f2544ba25c990c4a533199139d0d6240126c076e17b4e713d915f7e086a8f" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.413429 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fbdnl" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.419571 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.419586 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e68bea53-8a9e-4229-bd71-1c6aeca5202c","Type":"ContainerDied","Data":"9a6124526d253156a204e074cb67ba51e44a4a5214ad5ebed10a5f07e9758ff5"} Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.453439 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.459416 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.460916 4751 scope.go:117] "RemoveContainer" containerID="32c2285a2d8cbe283cea02e3b0a2cbd78b6caafd06a9554df4c7322136c2c845" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.472320 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.496503 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.517584 4751 scope.go:117] "RemoveContainer" containerID="e589592682b434ec4626756437d451c2078f149571f5abd15279eba34e38c5a2" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.517933 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:47:17 crc kubenswrapper[4751]: E0227 16:47:17.518364 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" containerName="glance-httpd" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518383 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" containerName="glance-httpd" Feb 27 16:47:17 crc kubenswrapper[4751]: E0227 16:47:17.518418 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" containerName="glance-httpd" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518428 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" containerName="glance-httpd" Feb 27 16:47:17 crc kubenswrapper[4751]: E0227 16:47:17.518442 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" containerName="glance-log" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518451 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" containerName="glance-log" Feb 27 16:47:17 crc kubenswrapper[4751]: E0227 16:47:17.518464 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0298748-d6b6-46e7-a34d-381cf00a4aed" containerName="neutron-db-sync" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518472 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0298748-d6b6-46e7-a34d-381cf00a4aed" containerName="neutron-db-sync" Feb 27 16:47:17 crc kubenswrapper[4751]: E0227 16:47:17.518482 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" containerName="init" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518490 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" containerName="init" Feb 27 16:47:17 crc kubenswrapper[4751]: E0227 16:47:17.518501 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" containerName="glance-log" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518509 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" containerName="glance-log" Feb 27 16:47:17 crc kubenswrapper[4751]: E0227 16:47:17.518522 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" containerName="dnsmasq-dns" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518531 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" containerName="dnsmasq-dns" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518752 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" containerName="glance-log" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518795 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="c68647f7-0ae5-4339-9449-b492f1e3b6b9" containerName="dnsmasq-dns" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518848 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" containerName="glance-log" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518862 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" containerName="glance-httpd" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518881 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" containerName="glance-httpd" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.518893 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0298748-d6b6-46e7-a34d-381cf00a4aed" containerName="neutron-db-sync" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.520000 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.525480 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.531844 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.531855 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.532015 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-n4zl7" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.532310 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.550525 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.552533 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.557824 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.558040 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.589388 4751 scope.go:117] "RemoveContainer" containerID="ace8b9e1535375486a4e18cb42f1a8a93b8e9743b99b38035afbc3ba992b727c" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.594030 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.608365 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.608481 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn768\" (UniqueName: \"kubernetes.io/projected/efb8a6cc-5def-4be4-82e1-b20f19d1c800-kube-api-access-xn768\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.608537 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.608572 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.608596 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-logs\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.608620 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.608637 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-scripts\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.608678 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-config-data\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710077 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710142 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn768\" (UniqueName: \"kubernetes.io/projected/efb8a6cc-5def-4be4-82e1-b20f19d1c800-kube-api-access-xn768\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710167 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whjlg\" (UniqueName: \"kubernetes.io/projected/f864f57e-a41a-4e30-9293-8ede35ea08dd-kube-api-access-whjlg\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710192 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-logs\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710210 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710243 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710269 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710284 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710306 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-logs\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710329 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710349 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710368 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-scripts\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710392 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710431 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710464 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-config-data\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710497 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.710991 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.712240 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.714485 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-logs\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.716984 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-scripts\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.717050 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.717125 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.719625 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-config-data\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.738771 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.749725 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn768\" (UniqueName: \"kubernetes.io/projected/efb8a6cc-5def-4be4-82e1-b20f19d1c800-kube-api-access-xn768\") pod \"glance-default-external-api-0\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.812621 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.812690 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.812785 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.812839 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whjlg\" (UniqueName: \"kubernetes.io/projected/f864f57e-a41a-4e30-9293-8ede35ea08dd-kube-api-access-whjlg\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.812869 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.812891 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-logs\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.812945 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.812997 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.813050 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.813739 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.816360 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-logs\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.823839 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.831003 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.831745 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.847112 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whjlg\" (UniqueName: \"kubernetes.io/projected/f864f57e-a41a-4e30-9293-8ede35ea08dd-kube-api-access-whjlg\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.852275 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.853030 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.868696 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.910219 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.915776 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:17 crc kubenswrapper[4751]: I0227 16:47:17.997057 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mvlkh" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.019711 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6v9xd\" (UniqueName: \"kubernetes.io/projected/907bed97-620e-441c-9539-b8e62c988b52-kube-api-access-6v9xd\") pod \"907bed97-620e-441c-9539-b8e62c988b52\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.019977 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-scripts\") pod \"907bed97-620e-441c-9539-b8e62c988b52\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.020009 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-combined-ca-bundle\") pod \"907bed97-620e-441c-9539-b8e62c988b52\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.020053 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-fernet-keys\") pod \"907bed97-620e-441c-9539-b8e62c988b52\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.020107 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-credential-keys\") pod \"907bed97-620e-441c-9539-b8e62c988b52\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.020128 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-config-data\") pod \"907bed97-620e-441c-9539-b8e62c988b52\" (UID: \"907bed97-620e-441c-9539-b8e62c988b52\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.035248 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "907bed97-620e-441c-9539-b8e62c988b52" (UID: "907bed97-620e-441c-9539-b8e62c988b52"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.046618 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "907bed97-620e-441c-9539-b8e62c988b52" (UID: "907bed97-620e-441c-9539-b8e62c988b52"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.059991 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/907bed97-620e-441c-9539-b8e62c988b52-kube-api-access-6v9xd" (OuterVolumeSpecName: "kube-api-access-6v9xd") pod "907bed97-620e-441c-9539-b8e62c988b52" (UID: "907bed97-620e-441c-9539-b8e62c988b52"). InnerVolumeSpecName "kube-api-access-6v9xd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.069621 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-scripts" (OuterVolumeSpecName: "scripts") pod "907bed97-620e-441c-9539-b8e62c988b52" (UID: "907bed97-620e-441c-9539-b8e62c988b52"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.098274 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-bb8z8"] Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.098495 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" podUID="6c666273-be4c-420f-a8d0-858a389c124f" containerName="dnsmasq-dns" containerID="cri-o://92015060d4409dfac614cc5618cb83a61eb9abea5e2973f2f7b0a339ca2fd00d" gracePeriod=10 Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.099669 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.114325 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "907bed97-620e-441c-9539-b8e62c988b52" (UID: "907bed97-620e-441c-9539-b8e62c988b52"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.122649 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fc5dm\" (UniqueName: \"kubernetes.io/projected/f306ad12-0f04-4414-8393-8ab5cc63c8b5-kube-api-access-fc5dm\") pod \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.122703 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-config-data\") pod \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.122740 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f306ad12-0f04-4414-8393-8ab5cc63c8b5-logs\") pod \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.122863 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-scripts\") pod \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.122886 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-combined-ca-bundle\") pod \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\" (UID: \"f306ad12-0f04-4414-8393-8ab5cc63c8b5\") " Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.123426 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6v9xd\" (UniqueName: \"kubernetes.io/projected/907bed97-620e-441c-9539-b8e62c988b52-kube-api-access-6v9xd\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.123450 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.123467 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.123479 4751 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.123491 4751 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-credential-keys\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.125832 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f306ad12-0f04-4414-8393-8ab5cc63c8b5-logs" (OuterVolumeSpecName: "logs") pod "f306ad12-0f04-4414-8393-8ab5cc63c8b5" (UID: "f306ad12-0f04-4414-8393-8ab5cc63c8b5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.136312 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-ssjx5"] Feb 27 16:47:18 crc kubenswrapper[4751]: E0227 16:47:18.136789 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="907bed97-620e-441c-9539-b8e62c988b52" containerName="keystone-bootstrap" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.136811 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="907bed97-620e-441c-9539-b8e62c988b52" containerName="keystone-bootstrap" Feb 27 16:47:18 crc kubenswrapper[4751]: E0227 16:47:18.136842 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f306ad12-0f04-4414-8393-8ab5cc63c8b5" containerName="placement-db-sync" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.136866 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f306ad12-0f04-4414-8393-8ab5cc63c8b5" containerName="placement-db-sync" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.137010 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="907bed97-620e-441c-9539-b8e62c988b52" containerName="keystone-bootstrap" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.137027 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f306ad12-0f04-4414-8393-8ab5cc63c8b5" containerName="placement-db-sync" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.138077 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.140222 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-scripts" (OuterVolumeSpecName: "scripts") pod "f306ad12-0f04-4414-8393-8ab5cc63c8b5" (UID: "f306ad12-0f04-4414-8393-8ab5cc63c8b5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.142950 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f306ad12-0f04-4414-8393-8ab5cc63c8b5-kube-api-access-fc5dm" (OuterVolumeSpecName: "kube-api-access-fc5dm") pod "f306ad12-0f04-4414-8393-8ab5cc63c8b5" (UID: "f306ad12-0f04-4414-8393-8ab5cc63c8b5"). InnerVolumeSpecName "kube-api-access-fc5dm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.169791 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-config-data" (OuterVolumeSpecName: "config-data") pod "907bed97-620e-441c-9539-b8e62c988b52" (UID: "907bed97-620e-441c-9539-b8e62c988b52"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.173090 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-ssjx5"] Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.180841 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f306ad12-0f04-4414-8393-8ab5cc63c8b5" (UID: "f306ad12-0f04-4414-8393-8ab5cc63c8b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.184679 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-config-data" (OuterVolumeSpecName: "config-data") pod "f306ad12-0f04-4414-8393-8ab5cc63c8b5" (UID: "f306ad12-0f04-4414-8393-8ab5cc63c8b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228498 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-config\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228675 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228702 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-svc\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228720 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228742 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228758 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2wlh\" (UniqueName: \"kubernetes.io/projected/ea037294-8525-4c48-a867-d66f97c08253-kube-api-access-r2wlh\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228804 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/907bed97-620e-441c-9539-b8e62c988b52-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228816 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fc5dm\" (UniqueName: \"kubernetes.io/projected/f306ad12-0f04-4414-8393-8ab5cc63c8b5-kube-api-access-fc5dm\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228826 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228833 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f306ad12-0f04-4414-8393-8ab5cc63c8b5-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228841 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.228849 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f306ad12-0f04-4414-8393-8ab5cc63c8b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.249960 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-688f5555d8-5fnpx"] Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.251530 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.265508 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-4c7l7" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.265842 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.265945 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.266269 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.287436 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-688f5555d8-5fnpx"] Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335347 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-config\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335724 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-ovndb-tls-certs\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335744 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-config\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335762 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-combined-ca-bundle\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335786 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335805 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-svc\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335825 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335848 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335863 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2wlh\" (UniqueName: \"kubernetes.io/projected/ea037294-8525-4c48-a867-d66f97c08253-kube-api-access-r2wlh\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335879 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-httpd-config\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.335895 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxbvv\" (UniqueName: \"kubernetes.io/projected/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-kube-api-access-vxbvv\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.337010 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-config\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.337035 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.337910 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.341936 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-svc\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.343537 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.363440 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2wlh\" (UniqueName: \"kubernetes.io/projected/ea037294-8525-4c48-a867-d66f97c08253-kube-api-access-r2wlh\") pod \"dnsmasq-dns-55f844cf75-ssjx5\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.437149 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-ovndb-tls-certs\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.437194 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-config\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.437215 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-combined-ca-bundle\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.437255 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-httpd-config\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.437275 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxbvv\" (UniqueName: \"kubernetes.io/projected/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-kube-api-access-vxbvv\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.444609 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-combined-ca-bundle\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.451539 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-ovndb-tls-certs\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.452346 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-httpd-config\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.452896 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x847w" event={"ID":"907bed97-620e-441c-9539-b8e62c988b52","Type":"ContainerDied","Data":"ea5f5e85f50e3f13640886f2bb2beb2cf18caebe778ea894ea6d1d7935d2029c"} Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.452927 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea5f5e85f50e3f13640886f2bb2beb2cf18caebe778ea894ea6d1d7935d2029c" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.452988 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x847w" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.453388 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-config\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.465609 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxbvv\" (UniqueName: \"kubernetes.io/projected/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-kube-api-access-vxbvv\") pod \"neutron-688f5555d8-5fnpx\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.467925 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.487087 4751 generic.go:334] "Generic (PLEG): container finished" podID="6c666273-be4c-420f-a8d0-858a389c124f" containerID="92015060d4409dfac614cc5618cb83a61eb9abea5e2973f2f7b0a339ca2fd00d" exitCode=0 Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.487136 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" event={"ID":"6c666273-be4c-420f-a8d0-858a389c124f","Type":"ContainerDied","Data":"92015060d4409dfac614cc5618cb83a61eb9abea5e2973f2f7b0a339ca2fd00d"} Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.500775 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mvlkh" event={"ID":"f306ad12-0f04-4414-8393-8ab5cc63c8b5","Type":"ContainerDied","Data":"5b32878aa739d511caeac604c3e367efefc6152d52123084c795c93eb94ed5f5"} Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.500818 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b32878aa739d511caeac604c3e367efefc6152d52123084c795c93eb94ed5f5" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.500900 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mvlkh" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.554453 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdb4412f-6028-4cd6-9a52-ca11a99a6474" path="/var/lib/kubelet/pods/cdb4412f-6028-4cd6-9a52-ca11a99a6474/volumes" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.555308 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e68bea53-8a9e-4229-bd71-1c6aeca5202c" path="/var/lib/kubelet/pods/e68bea53-8a9e-4229-bd71-1c6aeca5202c/volumes" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.556055 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6db7c8cdbf-x9xf8"] Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.557052 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.564936 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.565111 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.565203 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hvwps" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.565299 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.565427 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.565527 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.586162 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6db7c8cdbf-x9xf8"] Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.613450 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.619682 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-c7b6db6d-k4vfr"] Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.621140 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.622799 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.624362 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.624581 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.624843 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-78b27" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.624955 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.635452 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c7b6db6d-k4vfr"] Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.640315 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27twd\" (UniqueName: \"kubernetes.io/projected/0cf1e239-243c-4f96-abb6-c3fb850e98e1-kube-api-access-27twd\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.640342 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-fernet-keys\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.640479 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-internal-tls-certs\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.640499 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-credential-keys\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.640518 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-public-tls-certs\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.640573 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-config-data\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.640595 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-scripts\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.640623 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-combined-ca-bundle\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.750303 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-internal-tls-certs\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.750927 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-internal-tls-certs\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.750955 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-credential-keys\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.751077 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-public-tls-certs\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.751252 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-public-tls-certs\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.765780 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-config-data\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.765929 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2sbf\" (UniqueName: \"kubernetes.io/projected/9b497134-1c13-450d-830a-0e0e7d51fe9d-kube-api-access-s2sbf\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.765965 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-scripts\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.766117 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-combined-ca-bundle\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.766283 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27twd\" (UniqueName: \"kubernetes.io/projected/0cf1e239-243c-4f96-abb6-c3fb850e98e1-kube-api-access-27twd\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.766301 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-fernet-keys\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.766455 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-combined-ca-bundle\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.766588 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-config-data\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.766620 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b497134-1c13-450d-830a-0e0e7d51fe9d-logs\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.766790 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-scripts\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.769495 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.796111 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-scripts\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.800321 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-public-tls-certs\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.802169 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-config-data\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: E0227 16:47:18.803546 4751 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod907bed97_620e_441c_9539_b8e62c988b52.slice\": RecentStats: unable to find data in memory cache]" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.805687 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-fernet-keys\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.819096 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-combined-ca-bundle\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.834653 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-internal-tls-certs\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.855062 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-credential-keys\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.868283 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-combined-ca-bundle\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.868332 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-config-data\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.868355 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b497134-1c13-450d-830a-0e0e7d51fe9d-logs\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.868395 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-scripts\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.868441 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-internal-tls-certs\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.868490 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-public-tls-certs\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.868525 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2sbf\" (UniqueName: \"kubernetes.io/projected/9b497134-1c13-450d-830a-0e0e7d51fe9d-kube-api-access-s2sbf\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.875432 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b497134-1c13-450d-830a-0e0e7d51fe9d-logs\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.880314 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27twd\" (UniqueName: \"kubernetes.io/projected/0cf1e239-243c-4f96-abb6-c3fb850e98e1-kube-api-access-27twd\") pod \"keystone-6db7c8cdbf-x9xf8\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.885183 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-internal-tls-certs\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.888057 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-public-tls-certs\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.911924 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-combined-ca-bundle\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.926743 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-config-data\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.927579 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-scripts\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.940999 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2sbf\" (UniqueName: \"kubernetes.io/projected/9b497134-1c13-450d-830a-0e0e7d51fe9d-kube-api-access-s2sbf\") pod \"placement-c7b6db6d-k4vfr\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:18 crc kubenswrapper[4751]: I0227 16:47:18.976911 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.064181 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.070309 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5c5d5b6fdd-9d8xv"] Feb 27 16:47:19 crc kubenswrapper[4751]: E0227 16:47:19.070665 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c666273-be4c-420f-a8d0-858a389c124f" containerName="init" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.070679 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c666273-be4c-420f-a8d0-858a389c124f" containerName="init" Feb 27 16:47:19 crc kubenswrapper[4751]: E0227 16:47:19.070708 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c666273-be4c-420f-a8d0-858a389c124f" containerName="dnsmasq-dns" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.070714 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c666273-be4c-420f-a8d0-858a389c124f" containerName="dnsmasq-dns" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.070878 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c666273-be4c-420f-a8d0-858a389c124f" containerName="dnsmasq-dns" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.071763 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.109362 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.118474 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5c5d5b6fdd-9d8xv"] Feb 27 16:47:19 crc kubenswrapper[4751]: W0227 16:47:19.120682 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf864f57e_a41a_4e30_9293_8ede35ea08dd.slice/crio-67ea88b9b2b75da50386f5bdef1832861a8980c0c0642bb0f9fba4c4c32e7303 WatchSource:0}: Error finding container 67ea88b9b2b75da50386f5bdef1832861a8980c0c0642bb0f9fba4c4c32e7303: Status 404 returned error can't find the container with id 67ea88b9b2b75da50386f5bdef1832861a8980c0c0642bb0f9fba4c4c32e7303 Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.183948 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-swift-storage-0\") pod \"6c666273-be4c-420f-a8d0-858a389c124f\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.184317 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-config\") pod \"6c666273-be4c-420f-a8d0-858a389c124f\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.184440 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-nb\") pod \"6c666273-be4c-420f-a8d0-858a389c124f\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.184552 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-svc\") pod \"6c666273-be4c-420f-a8d0-858a389c124f\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.184572 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4nhv\" (UniqueName: \"kubernetes.io/projected/6c666273-be4c-420f-a8d0-858a389c124f-kube-api-access-d4nhv\") pod \"6c666273-be4c-420f-a8d0-858a389c124f\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.186781 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-sb\") pod \"6c666273-be4c-420f-a8d0-858a389c124f\" (UID: \"6c666273-be4c-420f-a8d0-858a389c124f\") " Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.187216 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-internal-tls-certs\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.187343 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-combined-ca-bundle\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.187371 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-scripts\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.187478 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-logs\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.187591 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-public-tls-certs\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.187651 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s757q\" (UniqueName: \"kubernetes.io/projected/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-kube-api-access-s757q\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.187713 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-config-data\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.188087 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.206855 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c666273-be4c-420f-a8d0-858a389c124f-kube-api-access-d4nhv" (OuterVolumeSpecName: "kube-api-access-d4nhv") pod "6c666273-be4c-420f-a8d0-858a389c124f" (UID: "6c666273-be4c-420f-a8d0-858a389c124f"). InnerVolumeSpecName "kube-api-access-d4nhv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.218143 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-78bc7f9d5b-gzgqp"] Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.223853 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.242377 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-78bc7f9d5b-gzgqp"] Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.289707 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-httpd-config\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.289750 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-public-tls-certs\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.289776 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49fvw\" (UniqueName: \"kubernetes.io/projected/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-kube-api-access-49fvw\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.289798 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s757q\" (UniqueName: \"kubernetes.io/projected/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-kube-api-access-s757q\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.289833 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-config-data\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.289865 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-ovndb-tls-certs\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.289887 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-internal-tls-certs\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.289928 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-combined-ca-bundle\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.289945 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-scripts\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.289985 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-config\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.290004 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-combined-ca-bundle\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.290028 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-logs\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.290071 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4nhv\" (UniqueName: \"kubernetes.io/projected/6c666273-be4c-420f-a8d0-858a389c124f-kube-api-access-d4nhv\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.290359 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-logs\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.297693 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6c666273-be4c-420f-a8d0-858a389c124f" (UID: "6c666273-be4c-420f-a8d0-858a389c124f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.299733 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-scripts\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.301552 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-public-tls-certs\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.302379 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-config-data\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.302933 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-combined-ca-bundle\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.304050 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-internal-tls-certs\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.304236 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-ssjx5"] Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.311454 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6c666273-be4c-420f-a8d0-858a389c124f" (UID: "6c666273-be4c-420f-a8d0-858a389c124f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.315783 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-config" (OuterVolumeSpecName: "config") pod "6c666273-be4c-420f-a8d0-858a389c124f" (UID: "6c666273-be4c-420f-a8d0-858a389c124f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.320268 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6c666273-be4c-420f-a8d0-858a389c124f" (UID: "6c666273-be4c-420f-a8d0-858a389c124f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.323955 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s757q\" (UniqueName: \"kubernetes.io/projected/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-kube-api-access-s757q\") pod \"placement-5c5d5b6fdd-9d8xv\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.367443 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6c666273-be4c-420f-a8d0-858a389c124f" (UID: "6c666273-be4c-420f-a8d0-858a389c124f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.395572 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-httpd-config\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.395630 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49fvw\" (UniqueName: \"kubernetes.io/projected/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-kube-api-access-49fvw\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.395697 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-ovndb-tls-certs\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.395761 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-config\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.395784 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-combined-ca-bundle\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.395880 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.395891 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.395902 4751 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.395915 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.395925 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c666273-be4c-420f-a8d0-858a389c124f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.399737 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-combined-ca-bundle\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.402507 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-httpd-config\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.405677 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-config\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.408442 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-ovndb-tls-certs\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.421366 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.428618 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49fvw\" (UniqueName: \"kubernetes.io/projected/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-kube-api-access-49fvw\") pod \"neutron-78bc7f9d5b-gzgqp\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.568558 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f864f57e-a41a-4e30-9293-8ede35ea08dd","Type":"ContainerStarted","Data":"67ea88b9b2b75da50386f5bdef1832861a8980c0c0642bb0f9fba4c4c32e7303"} Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.571920 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.580763 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-688f5555d8-5fnpx"] Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.590166 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"efb8a6cc-5def-4be4-82e1-b20f19d1c800","Type":"ContainerStarted","Data":"b08a234d7afcb1186bb65406412c781713c7c3f63a3ccdc53d4d086783801785"} Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.594067 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" event={"ID":"6c666273-be4c-420f-a8d0-858a389c124f","Type":"ContainerDied","Data":"cfc104fad94858dea6049b1c29b14b3c1e5f586e307fe9be9084edc53fe4618c"} Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.594128 4751 scope.go:117] "RemoveContainer" containerID="92015060d4409dfac614cc5618cb83a61eb9abea5e2973f2f7b0a339ca2fd00d" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.595043 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-bb8z8" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.613646 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" event={"ID":"ea037294-8525-4c48-a867-d66f97c08253","Type":"ContainerStarted","Data":"10f4bcbaeaaaed13eba99b0198a49a3028c37d8640275666acfac2c3ac0c5a94"} Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.716025 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6db7c8cdbf-x9xf8"] Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.900815 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-bb8z8"] Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.948595 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-bb8z8"] Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.953200 4751 scope.go:117] "RemoveContainer" containerID="4a28bd1c9e91e873f5ab7b4cc2bf511329afb34afd48fbaf670010994dd19b7f" Feb 27 16:47:19 crc kubenswrapper[4751]: I0227 16:47:19.970836 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c7b6db6d-k4vfr"] Feb 27 16:47:20 crc kubenswrapper[4751]: I0227 16:47:20.205437 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5c5d5b6fdd-9d8xv"] Feb 27 16:47:20 crc kubenswrapper[4751]: I0227 16:47:20.472540 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-78bc7f9d5b-gzgqp"] Feb 27 16:47:20 crc kubenswrapper[4751]: I0227 16:47:20.530479 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c666273-be4c-420f-a8d0-858a389c124f" path="/var/lib/kubelet/pods/6c666273-be4c-420f-a8d0-858a389c124f/volumes" Feb 27 16:47:20 crc kubenswrapper[4751]: I0227 16:47:20.622640 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c5d5b6fdd-9d8xv" event={"ID":"3c5e58eb-31a4-4253-8cb9-a9486bb2d955","Type":"ContainerStarted","Data":"a7dc5bbdf1330339eaa6d5cb30861bb716c4c5dcc8398476977a1a140453e867"} Feb 27 16:47:20 crc kubenswrapper[4751]: I0227 16:47:20.623539 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c7b6db6d-k4vfr" event={"ID":"9b497134-1c13-450d-830a-0e0e7d51fe9d","Type":"ContainerStarted","Data":"1dce7a56d726d7c785893b9517187158373995084dd91716bbfb1258da763af7"} Feb 27 16:47:20 crc kubenswrapper[4751]: I0227 16:47:20.624562 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6db7c8cdbf-x9xf8" event={"ID":"0cf1e239-243c-4f96-abb6-c3fb850e98e1","Type":"ContainerStarted","Data":"8e3d2c303580fdf702bd6c9ffc39ae3efede00b5bcb96918b39337fb39f02bce"} Feb 27 16:47:20 crc kubenswrapper[4751]: I0227 16:47:20.625628 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-688f5555d8-5fnpx" event={"ID":"6340b4f1-4797-40ed-aaaa-a37e9c0cd649","Type":"ContainerStarted","Data":"5476f9e5d47ee9749395d754923eebab918851f6cec44079faca0b1021e3062d"} Feb 27 16:47:20 crc kubenswrapper[4751]: I0227 16:47:20.639816 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78bc7f9d5b-gzgqp" event={"ID":"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe","Type":"ContainerStarted","Data":"9851b66786bceae07be853a8e8f0a1b51ea2084c8757422aaa819e630d62e1d4"} Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.306324 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-688f5555d8-5fnpx"] Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.363247 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-649c97d5df-x4tkf"] Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.364582 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.367368 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.368346 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.391158 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-649c97d5df-x4tkf"] Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.468127 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-ovndb-tls-certs\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.468179 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-internal-tls-certs\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.468531 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hs79p\" (UniqueName: \"kubernetes.io/projected/16754588-ca23-484b-b8e8-21bc94c640f3-kube-api-access-hs79p\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.468602 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-httpd-config\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.468804 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-combined-ca-bundle\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.468909 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-public-tls-certs\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.468995 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-config\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.571716 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hs79p\" (UniqueName: \"kubernetes.io/projected/16754588-ca23-484b-b8e8-21bc94c640f3-kube-api-access-hs79p\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.571763 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-httpd-config\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.571811 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-combined-ca-bundle\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.571845 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-public-tls-certs\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.571874 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-config\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.571911 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-ovndb-tls-certs\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.571930 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-internal-tls-certs\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.578450 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-httpd-config\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.578598 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-internal-tls-certs\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.578706 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-ovndb-tls-certs\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.579053 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-config\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.581234 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-public-tls-certs\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.581692 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-combined-ca-bundle\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.593373 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hs79p\" (UniqueName: \"kubernetes.io/projected/16754588-ca23-484b-b8e8-21bc94c640f3-kube-api-access-hs79p\") pod \"neutron-649c97d5df-x4tkf\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:21 crc kubenswrapper[4751]: I0227 16:47:21.681529 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:22 crc kubenswrapper[4751]: I0227 16:47:22.243138 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-649c97d5df-x4tkf"] Feb 27 16:47:22 crc kubenswrapper[4751]: W0227 16:47:22.246903 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16754588_ca23_484b_b8e8_21bc94c640f3.slice/crio-5bc57d33e7d1c40aacf0c0bd89f7bf6fb922749fa26c3e8499dff4cafda1351b WatchSource:0}: Error finding container 5bc57d33e7d1c40aacf0c0bd89f7bf6fb922749fa26c3e8499dff4cafda1351b: Status 404 returned error can't find the container with id 5bc57d33e7d1c40aacf0c0bd89f7bf6fb922749fa26c3e8499dff4cafda1351b Feb 27 16:47:22 crc kubenswrapper[4751]: I0227 16:47:22.659287 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-649c97d5df-x4tkf" event={"ID":"16754588-ca23-484b-b8e8-21bc94c640f3","Type":"ContainerStarted","Data":"5bc57d33e7d1c40aacf0c0bd89f7bf6fb922749fa26c3e8499dff4cafda1351b"} Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.684716 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c5d5b6fdd-9d8xv" event={"ID":"3c5e58eb-31a4-4253-8cb9-a9486bb2d955","Type":"ContainerStarted","Data":"fe0b48cb4c4111dfc56e9cc80355b87b652df2aa7701be61b0f630ca7e55427a"} Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.705887 4751 generic.go:334] "Generic (PLEG): container finished" podID="ea037294-8525-4c48-a867-d66f97c08253" containerID="656de3f15bfaf3e3acb2eb6f47617a535be06f8850c915799511fbd0adcb13d9" exitCode=0 Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.706004 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" event={"ID":"ea037294-8525-4c48-a867-d66f97c08253","Type":"ContainerDied","Data":"656de3f15bfaf3e3acb2eb6f47617a535be06f8850c915799511fbd0adcb13d9"} Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.728626 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6db7c8cdbf-x9xf8" event={"ID":"0cf1e239-243c-4f96-abb6-c3fb850e98e1","Type":"ContainerStarted","Data":"432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c"} Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.729467 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.779723 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f864f57e-a41a-4e30-9293-8ede35ea08dd","Type":"ContainerStarted","Data":"fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2"} Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.786795 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c7b6db6d-k4vfr" event={"ID":"9b497134-1c13-450d-830a-0e0e7d51fe9d","Type":"ContainerStarted","Data":"ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e"} Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.793721 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-649c97d5df-x4tkf" event={"ID":"16754588-ca23-484b-b8e8-21bc94c640f3","Type":"ContainerStarted","Data":"a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c"} Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.804746 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-688f5555d8-5fnpx" event={"ID":"6340b4f1-4797-40ed-aaaa-a37e9c0cd649","Type":"ContainerStarted","Data":"5ba7e76371df6100c4852bd8675183d313d8bddc2add76d62703958cd240ec1f"} Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.817136 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78bc7f9d5b-gzgqp" event={"ID":"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe","Type":"ContainerStarted","Data":"76b78dde5323d728cf0176c3ec46089efcdee6853d62d69a7eea78cb609893bd"} Feb 27 16:47:23 crc kubenswrapper[4751]: I0227 16:47:23.830593 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"efb8a6cc-5def-4be4-82e1-b20f19d1c800","Type":"ContainerStarted","Data":"9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015"} Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.540480 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6db7c8cdbf-x9xf8" podStartSLOduration=7.540463808 podStartE2EDuration="7.540463808s" podCreationTimestamp="2026-02-27 16:47:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:23.763836839 +0000 UTC m=+1405.910851286" watchObservedRunningTime="2026-02-27 16:47:25.540463808 +0000 UTC m=+1407.687478255" Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.868841 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78bc7f9d5b-gzgqp" event={"ID":"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe","Type":"ContainerStarted","Data":"627b5af683b614bccc6f8a8ba3d9f8d587a67e596d3eda586ddd8cd0c89ca727"} Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.884632 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"efb8a6cc-5def-4be4-82e1-b20f19d1c800","Type":"ContainerStarted","Data":"714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53"} Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.890931 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c5d5b6fdd-9d8xv" event={"ID":"3c5e58eb-31a4-4253-8cb9-a9486bb2d955","Type":"ContainerStarted","Data":"d68a8fb32c3c122cd258ca89b0d0d8f27592db1bade310c58767879538bba0eb"} Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.891620 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.891642 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.900196 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c7b6db6d-k4vfr" event={"ID":"9b497134-1c13-450d-830a-0e0e7d51fe9d","Type":"ContainerStarted","Data":"d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d"} Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.914185 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" event={"ID":"ea037294-8525-4c48-a867-d66f97c08253","Type":"ContainerStarted","Data":"83d6a2faf6b2015d89f9922dc8ce2721c834106a2150043178c9ca758685d04d"} Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.920014 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=8.919995856 podStartE2EDuration="8.919995856s" podCreationTimestamp="2026-02-27 16:47:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:25.900826354 +0000 UTC m=+1408.047840801" watchObservedRunningTime="2026-02-27 16:47:25.919995856 +0000 UTC m=+1408.067010293" Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.921975 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-649c97d5df-x4tkf" event={"ID":"16754588-ca23-484b-b8e8-21bc94c640f3","Type":"ContainerStarted","Data":"8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7"} Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.933608 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5c5d5b6fdd-9d8xv" podStartSLOduration=6.933589309 podStartE2EDuration="6.933589309s" podCreationTimestamp="2026-02-27 16:47:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:25.924478176 +0000 UTC m=+1408.071492643" watchObservedRunningTime="2026-02-27 16:47:25.933589309 +0000 UTC m=+1408.080603756" Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.935909 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f864f57e-a41a-4e30-9293-8ede35ea08dd","Type":"ContainerStarted","Data":"e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af"} Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.948292 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-688f5555d8-5fnpx" podUID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" containerName="neutron-api" containerID="cri-o://5ba7e76371df6100c4852bd8675183d313d8bddc2add76d62703958cd240ec1f" gracePeriod=30 Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.948616 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-688f5555d8-5fnpx" event={"ID":"6340b4f1-4797-40ed-aaaa-a37e9c0cd649","Type":"ContainerStarted","Data":"bbfa9255e0aa1d090382b57faaa3d6afdae2ae8c3686c5f384c263f680ed66cc"} Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.948671 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.948937 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-688f5555d8-5fnpx" podUID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" containerName="neutron-httpd" containerID="cri-o://bbfa9255e0aa1d090382b57faaa3d6afdae2ae8c3686c5f384c263f680ed66cc" gracePeriod=30 Feb 27 16:47:25 crc kubenswrapper[4751]: I0227 16:47:25.976029 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-688f5555d8-5fnpx" podStartSLOduration=7.976012403 podStartE2EDuration="7.976012403s" podCreationTimestamp="2026-02-27 16:47:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:25.97066077 +0000 UTC m=+1408.117675217" watchObservedRunningTime="2026-02-27 16:47:25.976012403 +0000 UTC m=+1408.123026850" Feb 27 16:47:26 crc kubenswrapper[4751]: I0227 16:47:26.964750 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vgcdl" event={"ID":"80c6b259-7f53-44bc-9230-adeacd7d9cf6","Type":"ContainerStarted","Data":"0f1b9d467c3c6be9ba6bc745147b076f8155ad05ceee660126b0cad365625a05"} Feb 27 16:47:26 crc kubenswrapper[4751]: I0227 16:47:26.966517 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:26 crc kubenswrapper[4751]: I0227 16:47:26.966566 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:26 crc kubenswrapper[4751]: I0227 16:47:26.966581 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:26 crc kubenswrapper[4751]: I0227 16:47:26.981491 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" podStartSLOduration=8.981476432000001 podStartE2EDuration="8.981476432s" podCreationTimestamp="2026-02-27 16:47:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:26.979009996 +0000 UTC m=+1409.126024443" watchObservedRunningTime="2026-02-27 16:47:26.981476432 +0000 UTC m=+1409.128490879" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.010275 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-649c97d5df-x4tkf" podStartSLOduration=6.010255331 podStartE2EDuration="6.010255331s" podCreationTimestamp="2026-02-27 16:47:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:26.999730409 +0000 UTC m=+1409.146744856" watchObservedRunningTime="2026-02-27 16:47:27.010255331 +0000 UTC m=+1409.157269788" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.031898 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=10.031875588 podStartE2EDuration="10.031875588s" podCreationTimestamp="2026-02-27 16:47:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:27.021375998 +0000 UTC m=+1409.168390455" watchObservedRunningTime="2026-02-27 16:47:27.031875588 +0000 UTC m=+1409.178890035" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.046664 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-vgcdl" podStartSLOduration=2.820636111 podStartE2EDuration="39.046644483s" podCreationTimestamp="2026-02-27 16:46:48 +0000 UTC" firstStartedPulling="2026-02-27 16:46:49.917060933 +0000 UTC m=+1372.064075380" lastFinishedPulling="2026-02-27 16:47:26.143069305 +0000 UTC m=+1408.290083752" observedRunningTime="2026-02-27 16:47:27.034519839 +0000 UTC m=+1409.181534296" watchObservedRunningTime="2026-02-27 16:47:27.046644483 +0000 UTC m=+1409.193658930" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.064531 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-c7b6db6d-k4vfr" podStartSLOduration=9.0645091 podStartE2EDuration="9.0645091s" podCreationTimestamp="2026-02-27 16:47:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:27.056185858 +0000 UTC m=+1409.203200305" watchObservedRunningTime="2026-02-27 16:47:27.0645091 +0000 UTC m=+1409.211523557" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.078708 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-78bc7f9d5b-gzgqp" podStartSLOduration=8.078690069 podStartE2EDuration="8.078690069s" podCreationTimestamp="2026-02-27 16:47:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:27.072096543 +0000 UTC m=+1409.219110990" watchObservedRunningTime="2026-02-27 16:47:27.078690069 +0000 UTC m=+1409.225704526" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.853533 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.853873 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.894331 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.916824 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.916875 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.918766 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.968992 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.975255 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.977671 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-6s84l" event={"ID":"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df","Type":"ContainerStarted","Data":"ccf6a1800c30538fc57cee8fb0f6c11241c62e6a5663f569cec7197d8da7be0d"} Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.977787 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.977829 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.979098 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 27 16:47:27 crc kubenswrapper[4751]: I0227 16:47:27.979124 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 27 16:47:28 crc kubenswrapper[4751]: I0227 16:47:28.036614 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-6s84l" podStartSLOduration=3.426463052 podStartE2EDuration="40.036598216s" podCreationTimestamp="2026-02-27 16:46:48 +0000 UTC" firstStartedPulling="2026-02-27 16:46:49.749565878 +0000 UTC m=+1371.896580325" lastFinishedPulling="2026-02-27 16:47:26.359701042 +0000 UTC m=+1408.506715489" observedRunningTime="2026-02-27 16:47:28.034131961 +0000 UTC m=+1410.181146408" watchObservedRunningTime="2026-02-27 16:47:28.036598216 +0000 UTC m=+1410.183612653" Feb 27 16:47:31 crc kubenswrapper[4751]: I0227 16:47:31.047289 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:31 crc kubenswrapper[4751]: I0227 16:47:31.487263 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 27 16:47:31 crc kubenswrapper[4751]: I0227 16:47:31.495533 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 27 16:47:32 crc kubenswrapper[4751]: I0227 16:47:32.024953 4751 generic.go:334] "Generic (PLEG): container finished" podID="80c6b259-7f53-44bc-9230-adeacd7d9cf6" containerID="0f1b9d467c3c6be9ba6bc745147b076f8155ad05ceee660126b0cad365625a05" exitCode=0 Feb 27 16:47:32 crc kubenswrapper[4751]: I0227 16:47:32.025016 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vgcdl" event={"ID":"80c6b259-7f53-44bc-9230-adeacd7d9cf6","Type":"ContainerDied","Data":"0f1b9d467c3c6be9ba6bc745147b076f8155ad05ceee660126b0cad365625a05"} Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.469547 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.473109 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.477485 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.558241 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fkn2q\" (UniqueName: \"kubernetes.io/projected/80c6b259-7f53-44bc-9230-adeacd7d9cf6-kube-api-access-fkn2q\") pod \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.558341 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-db-sync-config-data\") pod \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.558474 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-combined-ca-bundle\") pod \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\" (UID: \"80c6b259-7f53-44bc-9230-adeacd7d9cf6\") " Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.562678 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80c6b259-7f53-44bc-9230-adeacd7d9cf6-kube-api-access-fkn2q" (OuterVolumeSpecName: "kube-api-access-fkn2q") pod "80c6b259-7f53-44bc-9230-adeacd7d9cf6" (UID: "80c6b259-7f53-44bc-9230-adeacd7d9cf6"). InnerVolumeSpecName "kube-api-access-fkn2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.572569 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "80c6b259-7f53-44bc-9230-adeacd7d9cf6" (UID: "80c6b259-7f53-44bc-9230-adeacd7d9cf6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.611441 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-nq6b4"] Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.611783 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" podUID="e72931dc-c81e-4f44-8e6b-72fab4e429b4" containerName="dnsmasq-dns" containerID="cri-o://56831ba4dd315976c6290526492af98a99941ca79611171f8debd770c9e4e37c" gracePeriod=10 Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.626047 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "80c6b259-7f53-44bc-9230-adeacd7d9cf6" (UID: "80c6b259-7f53-44bc-9230-adeacd7d9cf6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.664628 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fkn2q\" (UniqueName: \"kubernetes.io/projected/80c6b259-7f53-44bc-9230-adeacd7d9cf6-kube-api-access-fkn2q\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.664659 4751 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:33 crc kubenswrapper[4751]: I0227 16:47:33.664668 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80c6b259-7f53-44bc-9230-adeacd7d9cf6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:33 crc kubenswrapper[4751]: E0227 16:47:33.800030 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.050883 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vgcdl" event={"ID":"80c6b259-7f53-44bc-9230-adeacd7d9cf6","Type":"ContainerDied","Data":"90670478599c3af368915df14832459d1b379ff067034f5d76556cccab9911e6"} Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.050932 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90670478599c3af368915df14832459d1b379ff067034f5d76556cccab9911e6" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.051109 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vgcdl" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.053658 4751 generic.go:334] "Generic (PLEG): container finished" podID="e72931dc-c81e-4f44-8e6b-72fab4e429b4" containerID="56831ba4dd315976c6290526492af98a99941ca79611171f8debd770c9e4e37c" exitCode=0 Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.053739 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" event={"ID":"e72931dc-c81e-4f44-8e6b-72fab4e429b4","Type":"ContainerDied","Data":"56831ba4dd315976c6290526492af98a99941ca79611171f8debd770c9e4e37c"} Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.056940 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bba51b8-4ef1-418b-86b4-59e9e52a6cac","Type":"ContainerStarted","Data":"ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12"} Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.057124 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="ceilometer-notification-agent" containerID="cri-o://f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271" gracePeriod=30 Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.057172 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.057242 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="proxy-httpd" containerID="cri-o://ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12" gracePeriod=30 Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.057291 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="sg-core" containerID="cri-o://34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698" gracePeriod=30 Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.084177 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.173602 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-swift-storage-0\") pod \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.173856 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-nb\") pod \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.173981 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-svc\") pod \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.174088 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-config\") pod \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.174195 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-sb\") pod \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.174335 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dmpn\" (UniqueName: \"kubernetes.io/projected/e72931dc-c81e-4f44-8e6b-72fab4e429b4-kube-api-access-2dmpn\") pod \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\" (UID: \"e72931dc-c81e-4f44-8e6b-72fab4e429b4\") " Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.185935 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e72931dc-c81e-4f44-8e6b-72fab4e429b4-kube-api-access-2dmpn" (OuterVolumeSpecName: "kube-api-access-2dmpn") pod "e72931dc-c81e-4f44-8e6b-72fab4e429b4" (UID: "e72931dc-c81e-4f44-8e6b-72fab4e429b4"). InnerVolumeSpecName "kube-api-access-2dmpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.236819 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e72931dc-c81e-4f44-8e6b-72fab4e429b4" (UID: "e72931dc-c81e-4f44-8e6b-72fab4e429b4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.244990 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e72931dc-c81e-4f44-8e6b-72fab4e429b4" (UID: "e72931dc-c81e-4f44-8e6b-72fab4e429b4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.245095 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e72931dc-c81e-4f44-8e6b-72fab4e429b4" (UID: "e72931dc-c81e-4f44-8e6b-72fab4e429b4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.261415 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e72931dc-c81e-4f44-8e6b-72fab4e429b4" (UID: "e72931dc-c81e-4f44-8e6b-72fab4e429b4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.276564 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.282382 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.282425 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dmpn\" (UniqueName: \"kubernetes.io/projected/e72931dc-c81e-4f44-8e6b-72fab4e429b4-kube-api-access-2dmpn\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.282436 4751 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.288181 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-config" (OuterVolumeSpecName: "config") pod "e72931dc-c81e-4f44-8e6b-72fab4e429b4" (UID: "e72931dc-c81e-4f44-8e6b-72fab4e429b4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.336495 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-d9bcd5f6c-zlj75"] Feb 27 16:47:34 crc kubenswrapper[4751]: E0227 16:47:34.336943 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e72931dc-c81e-4f44-8e6b-72fab4e429b4" containerName="init" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.336971 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e72931dc-c81e-4f44-8e6b-72fab4e429b4" containerName="init" Feb 27 16:47:34 crc kubenswrapper[4751]: E0227 16:47:34.336998 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e72931dc-c81e-4f44-8e6b-72fab4e429b4" containerName="dnsmasq-dns" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.337004 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e72931dc-c81e-4f44-8e6b-72fab4e429b4" containerName="dnsmasq-dns" Feb 27 16:47:34 crc kubenswrapper[4751]: E0227 16:47:34.337029 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80c6b259-7f53-44bc-9230-adeacd7d9cf6" containerName="barbican-db-sync" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.337037 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="80c6b259-7f53-44bc-9230-adeacd7d9cf6" containerName="barbican-db-sync" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.337216 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="e72931dc-c81e-4f44-8e6b-72fab4e429b4" containerName="dnsmasq-dns" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.337244 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="80c6b259-7f53-44bc-9230-adeacd7d9cf6" containerName="barbican-db-sync" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.338233 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.341175 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-p4xqw" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.341367 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.341639 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.344973 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-d9bcd5f6c-zlj75"] Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.385483 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.385522 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e72931dc-c81e-4f44-8e6b-72fab4e429b4-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.394851 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-55c754cd9d-n8xn9"] Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.396218 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.398536 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.467983 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-5kmmx"] Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.469525 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.482288 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-55c754cd9d-n8xn9"] Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.487027 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-combined-ca-bundle\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.487115 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data-custom\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.487147 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lkxn\" (UniqueName: \"kubernetes.io/projected/a0840d34-f0f3-4bfd-a33c-29cc1e268586-kube-api-access-6lkxn\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.487188 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.487219 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.487242 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cn582\" (UniqueName: \"kubernetes.io/projected/a9f1619e-893b-4f17-b105-214ccbf6385e-kube-api-access-cn582\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.487296 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-combined-ca-bundle\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.487314 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9f1619e-893b-4f17-b105-214ccbf6385e-logs\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.487346 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0840d34-f0f3-4bfd-a33c-29cc1e268586-logs\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.487359 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data-custom\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.492607 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-5kmmx"] Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.568391 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7659f547c6-hgdwt"] Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.570458 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.573084 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589007 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data-custom\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589075 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lkxn\" (UniqueName: \"kubernetes.io/projected/a0840d34-f0f3-4bfd-a33c-29cc1e268586-kube-api-access-6lkxn\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589135 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589166 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589211 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cn582\" (UniqueName: \"kubernetes.io/projected/a9f1619e-893b-4f17-b105-214ccbf6385e-kube-api-access-cn582\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589261 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgtnm\" (UniqueName: \"kubernetes.io/projected/7419cb2d-20ce-4408-ad22-15e818562876-kube-api-access-cgtnm\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589290 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589380 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-combined-ca-bundle\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589420 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9f1619e-893b-4f17-b105-214ccbf6385e-logs\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589460 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589493 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-config\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.589516 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-svc\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.594547 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9f1619e-893b-4f17-b105-214ccbf6385e-logs\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.594457 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0840d34-f0f3-4bfd-a33c-29cc1e268586-logs\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.594626 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data-custom\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.594986 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0840d34-f0f3-4bfd-a33c-29cc1e268586-logs\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.595221 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-combined-ca-bundle\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.595257 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.616368 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-combined-ca-bundle\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.616541 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data-custom\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.616920 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.617300 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-combined-ca-bundle\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.617745 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.617905 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data-custom\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.620518 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cn582\" (UniqueName: \"kubernetes.io/projected/a9f1619e-893b-4f17-b105-214ccbf6385e-kube-api-access-cn582\") pod \"barbican-keystone-listener-55c754cd9d-n8xn9\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.620579 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7659f547c6-hgdwt"] Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.667991 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lkxn\" (UniqueName: \"kubernetes.io/projected/a0840d34-f0f3-4bfd-a33c-29cc1e268586-kube-api-access-6lkxn\") pod \"barbican-worker-d9bcd5f6c-zlj75\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697440 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697504 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697527 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-config\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697549 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-svc\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697585 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697624 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-combined-ca-bundle\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697663 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tr2j\" (UniqueName: \"kubernetes.io/projected/1e512657-d0e1-4289-b430-0fc78d20aca7-kube-api-access-6tr2j\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697798 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data-custom\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697860 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgtnm\" (UniqueName: \"kubernetes.io/projected/7419cb2d-20ce-4408-ad22-15e818562876-kube-api-access-cgtnm\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697888 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.697908 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e512657-d0e1-4289-b430-0fc78d20aca7-logs\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.699232 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.699611 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-config\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.700100 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.700351 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-svc\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.700813 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.701753 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.718253 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgtnm\" (UniqueName: \"kubernetes.io/projected/7419cb2d-20ce-4408-ad22-15e818562876-kube-api-access-cgtnm\") pod \"dnsmasq-dns-85ff748b95-5kmmx\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.753183 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.801523 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data-custom\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.801634 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e512657-d0e1-4289-b430-0fc78d20aca7-logs\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.801690 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.801789 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-combined-ca-bundle\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.801833 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tr2j\" (UniqueName: \"kubernetes.io/projected/1e512657-d0e1-4289-b430-0fc78d20aca7-kube-api-access-6tr2j\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.804357 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e512657-d0e1-4289-b430-0fc78d20aca7-logs\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.805960 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.808211 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data-custom\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.810864 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.812935 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-combined-ca-bundle\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.821446 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tr2j\" (UniqueName: \"kubernetes.io/projected/1e512657-d0e1-4289-b430-0fc78d20aca7-kube-api-access-6tr2j\") pod \"barbican-api-7659f547c6-hgdwt\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:34 crc kubenswrapper[4751]: I0227 16:47:34.901801 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.088028 4751 generic.go:334] "Generic (PLEG): container finished" podID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerID="ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12" exitCode=0 Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.088082 4751 generic.go:334] "Generic (PLEG): container finished" podID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerID="34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698" exitCode=2 Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.088200 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bba51b8-4ef1-418b-86b4-59e9e52a6cac","Type":"ContainerDied","Data":"ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12"} Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.088230 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bba51b8-4ef1-418b-86b4-59e9e52a6cac","Type":"ContainerDied","Data":"34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698"} Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.097319 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" event={"ID":"e72931dc-c81e-4f44-8e6b-72fab4e429b4","Type":"ContainerDied","Data":"b2b15e34fd57e45c1867b44456c2b3b70031ad1084d093537ea54ff48542faa2"} Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.097385 4751 scope.go:117] "RemoveContainer" containerID="56831ba4dd315976c6290526492af98a99941ca79611171f8debd770c9e4e37c" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.097582 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.136740 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-nq6b4"] Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.136760 4751 scope.go:117] "RemoveContainer" containerID="a5cb05deb68bba72a0d13b365474189ae8b4505f7f791a7924ae015760498180" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.145821 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-nq6b4"] Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.222007 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-d9bcd5f6c-zlj75"] Feb 27 16:47:35 crc kubenswrapper[4751]: W0227 16:47:35.236867 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda0840d34_f0f3_4bfd_a33c_29cc1e268586.slice/crio-0751aa2cf43b9b59cd932572a646b4b5fd97dcd04a31d419cf30b1b84ab7296a WatchSource:0}: Error finding container 0751aa2cf43b9b59cd932572a646b4b5fd97dcd04a31d419cf30b1b84ab7296a: Status 404 returned error can't find the container with id 0751aa2cf43b9b59cd932572a646b4b5fd97dcd04a31d419cf30b1b84ab7296a Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.384354 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-5kmmx"] Feb 27 16:47:35 crc kubenswrapper[4751]: W0227 16:47:35.494536 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9f1619e_893b_4f17_b105_214ccbf6385e.slice/crio-e9b0e432779ddc335de600234e8d942838cb5862923c3006c0c119578f46d441 WatchSource:0}: Error finding container e9b0e432779ddc335de600234e8d942838cb5862923c3006c0c119578f46d441: Status 404 returned error can't find the container with id e9b0e432779ddc335de600234e8d942838cb5862923c3006c0c119578f46d441 Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.497776 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-55c754cd9d-n8xn9"] Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.579038 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.582898 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7659f547c6-hgdwt"] Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.726392 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjtm2\" (UniqueName: \"kubernetes.io/projected/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-kube-api-access-wjtm2\") pod \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.726543 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-log-httpd\") pod \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.726574 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-config-data\") pod \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.726603 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-scripts\") pod \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.726633 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-sg-core-conf-yaml\") pod \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.726715 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-combined-ca-bundle\") pod \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.726792 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-run-httpd\") pod \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\" (UID: \"5bba51b8-4ef1-418b-86b4-59e9e52a6cac\") " Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.727253 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5bba51b8-4ef1-418b-86b4-59e9e52a6cac" (UID: "5bba51b8-4ef1-418b-86b4-59e9e52a6cac"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.727832 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5bba51b8-4ef1-418b-86b4-59e9e52a6cac" (UID: "5bba51b8-4ef1-418b-86b4-59e9e52a6cac"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.728168 4751 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.728187 4751 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.731604 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-kube-api-access-wjtm2" (OuterVolumeSpecName: "kube-api-access-wjtm2") pod "5bba51b8-4ef1-418b-86b4-59e9e52a6cac" (UID: "5bba51b8-4ef1-418b-86b4-59e9e52a6cac"). InnerVolumeSpecName "kube-api-access-wjtm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.731944 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-scripts" (OuterVolumeSpecName: "scripts") pod "5bba51b8-4ef1-418b-86b4-59e9e52a6cac" (UID: "5bba51b8-4ef1-418b-86b4-59e9e52a6cac"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.766370 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5bba51b8-4ef1-418b-86b4-59e9e52a6cac" (UID: "5bba51b8-4ef1-418b-86b4-59e9e52a6cac"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.817776 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5bba51b8-4ef1-418b-86b4-59e9e52a6cac" (UID: "5bba51b8-4ef1-418b-86b4-59e9e52a6cac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.819479 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-config-data" (OuterVolumeSpecName: "config-data") pod "5bba51b8-4ef1-418b-86b4-59e9e52a6cac" (UID: "5bba51b8-4ef1-418b-86b4-59e9e52a6cac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.829531 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.829571 4751 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.829586 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.829598 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjtm2\" (UniqueName: \"kubernetes.io/projected/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-kube-api-access-wjtm2\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:35 crc kubenswrapper[4751]: I0227 16:47:35.829608 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bba51b8-4ef1-418b-86b4-59e9e52a6cac-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.107655 4751 generic.go:334] "Generic (PLEG): container finished" podID="f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" containerID="ccf6a1800c30538fc57cee8fb0f6c11241c62e6a5663f569cec7197d8da7be0d" exitCode=0 Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.107737 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-6s84l" event={"ID":"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df","Type":"ContainerDied","Data":"ccf6a1800c30538fc57cee8fb0f6c11241c62e6a5663f569cec7197d8da7be0d"} Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.110585 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7659f547c6-hgdwt" event={"ID":"1e512657-d0e1-4289-b430-0fc78d20aca7","Type":"ContainerStarted","Data":"e185e017aabd6eea6f75a03f5e4ac18ff4ed67ec022a4811b85b1795a1c5cb13"} Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.110626 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7659f547c6-hgdwt" event={"ID":"1e512657-d0e1-4289-b430-0fc78d20aca7","Type":"ContainerStarted","Data":"cd27bf82eb04cb3fddf318ca5f710fddb5fc8873e483c05ec4bacbc336c6ba0a"} Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.110643 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7659f547c6-hgdwt" event={"ID":"1e512657-d0e1-4289-b430-0fc78d20aca7","Type":"ContainerStarted","Data":"e4b919c61a8f254548b3a9b67273690bde47ef59a01a22ac35627a04a8753f6d"} Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.110855 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.110892 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.114872 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" event={"ID":"a9f1619e-893b-4f17-b105-214ccbf6385e","Type":"ContainerStarted","Data":"e9b0e432779ddc335de600234e8d942838cb5862923c3006c0c119578f46d441"} Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.118235 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" event={"ID":"a0840d34-f0f3-4bfd-a33c-29cc1e268586","Type":"ContainerStarted","Data":"0751aa2cf43b9b59cd932572a646b4b5fd97dcd04a31d419cf30b1b84ab7296a"} Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.121454 4751 generic.go:334] "Generic (PLEG): container finished" podID="7419cb2d-20ce-4408-ad22-15e818562876" containerID="b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed" exitCode=0 Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.121526 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" event={"ID":"7419cb2d-20ce-4408-ad22-15e818562876","Type":"ContainerDied","Data":"b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed"} Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.121553 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" event={"ID":"7419cb2d-20ce-4408-ad22-15e818562876","Type":"ContainerStarted","Data":"bcd2ad518d40564a5de80f74340eff727b426612b4709142be18da0cd7c3b621"} Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.126832 4751 generic.go:334] "Generic (PLEG): container finished" podID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerID="f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271" exitCode=0 Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.126878 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bba51b8-4ef1-418b-86b4-59e9e52a6cac","Type":"ContainerDied","Data":"f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271"} Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.126906 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bba51b8-4ef1-418b-86b4-59e9e52a6cac","Type":"ContainerDied","Data":"b8ba57b901623b4cd8a77d53c8ca19d66768deae1afb88d5fb08bb1a75c853c8"} Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.126927 4751 scope.go:117] "RemoveContainer" containerID="ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.127028 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.155138 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7659f547c6-hgdwt" podStartSLOduration=2.155114966 podStartE2EDuration="2.155114966s" podCreationTimestamp="2026-02-27 16:47:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:36.14666204 +0000 UTC m=+1418.293676477" watchObservedRunningTime="2026-02-27 16:47:36.155114966 +0000 UTC m=+1418.302129413" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.209803 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.229208 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.241869 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:36 crc kubenswrapper[4751]: E0227 16:47:36.242340 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="ceilometer-notification-agent" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.242357 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="ceilometer-notification-agent" Feb 27 16:47:36 crc kubenswrapper[4751]: E0227 16:47:36.242726 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="proxy-httpd" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.242797 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="proxy-httpd" Feb 27 16:47:36 crc kubenswrapper[4751]: E0227 16:47:36.242819 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="sg-core" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.242827 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="sg-core" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.243081 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="sg-core" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.243123 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="ceilometer-notification-agent" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.243136 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" containerName="proxy-httpd" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.249692 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.253605 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.253852 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.256202 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.340608 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-config-data\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.340887 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.340992 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-log-httpd\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.341201 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-scripts\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.341383 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-run-httpd\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.341506 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-662jh\" (UniqueName: \"kubernetes.io/projected/cd8468ed-ddbc-411a-9d7c-931e4962aed7-kube-api-access-662jh\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.341591 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.443516 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-run-httpd\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.443572 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-662jh\" (UniqueName: \"kubernetes.io/projected/cd8468ed-ddbc-411a-9d7c-931e4962aed7-kube-api-access-662jh\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.443592 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.443671 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-config-data\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.443686 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.443708 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-scripts\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.443722 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-log-httpd\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.444156 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-log-httpd\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.444366 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-run-httpd\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.454452 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.455270 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-scripts\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.456575 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.473826 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-config-data\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.487140 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-662jh\" (UniqueName: \"kubernetes.io/projected/cd8468ed-ddbc-411a-9d7c-931e4962aed7-kube-api-access-662jh\") pod \"ceilometer-0\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.531687 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bba51b8-4ef1-418b-86b4-59e9e52a6cac" path="/var/lib/kubelet/pods/5bba51b8-4ef1-418b-86b4-59e9e52a6cac/volumes" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.532798 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e72931dc-c81e-4f44-8e6b-72fab4e429b4" path="/var/lib/kubelet/pods/e72931dc-c81e-4f44-8e6b-72fab4e429b4/volumes" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.612364 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.643846 4751 scope.go:117] "RemoveContainer" containerID="34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.757285 4751 scope.go:117] "RemoveContainer" containerID="f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.789667 4751 scope.go:117] "RemoveContainer" containerID="ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12" Feb 27 16:47:36 crc kubenswrapper[4751]: E0227 16:47:36.790138 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12\": container with ID starting with ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12 not found: ID does not exist" containerID="ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.790185 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12"} err="failed to get container status \"ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12\": rpc error: code = NotFound desc = could not find container \"ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12\": container with ID starting with ff10e9a39d201c08944bf42f6a1cda2a5af9b2b667c51f4789f57da8335cdf12 not found: ID does not exist" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.790214 4751 scope.go:117] "RemoveContainer" containerID="34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698" Feb 27 16:47:36 crc kubenswrapper[4751]: E0227 16:47:36.790592 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698\": container with ID starting with 34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698 not found: ID does not exist" containerID="34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.790627 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698"} err="failed to get container status \"34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698\": rpc error: code = NotFound desc = could not find container \"34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698\": container with ID starting with 34e35acdba06ac6dd6527ab73950f94be57c05bc13e712a4e351b33f56a39698 not found: ID does not exist" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.790658 4751 scope.go:117] "RemoveContainer" containerID="f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271" Feb 27 16:47:36 crc kubenswrapper[4751]: E0227 16:47:36.790861 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271\": container with ID starting with f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271 not found: ID does not exist" containerID="f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271" Feb 27 16:47:36 crc kubenswrapper[4751]: I0227 16:47:36.790882 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271"} err="failed to get container status \"f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271\": rpc error: code = NotFound desc = could not find container \"f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271\": container with ID starting with f27e5eaa429e78791aca833690688958b2eac18b004d3f3ef116330e59f33271 not found: ID does not exist" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.166899 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" event={"ID":"7419cb2d-20ce-4408-ad22-15e818562876","Type":"ContainerStarted","Data":"1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be"} Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.167839 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.179686 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" event={"ID":"a0840d34-f0f3-4bfd-a33c-29cc1e268586","Type":"ContainerStarted","Data":"42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb"} Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.207584 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.227738 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" podStartSLOduration=3.227717929 podStartE2EDuration="3.227717929s" podCreationTimestamp="2026-02-27 16:47:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:37.219966792 +0000 UTC m=+1419.366981239" watchObservedRunningTime="2026-02-27 16:47:37.227717929 +0000 UTC m=+1419.374732376" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.569486 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-6s84l" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.679197 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-scripts\") pod \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.679334 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-etc-machine-id\") pod \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.679374 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6llpc\" (UniqueName: \"kubernetes.io/projected/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-kube-api-access-6llpc\") pod \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.679453 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-db-sync-config-data\") pod \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.679486 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-combined-ca-bundle\") pod \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.679517 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-config-data\") pod \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\" (UID: \"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df\") " Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.680934 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" (UID: "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.702413 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" (UID: "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.702380 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-scripts" (OuterVolumeSpecName: "scripts") pod "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" (UID: "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.702467 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-kube-api-access-6llpc" (OuterVolumeSpecName: "kube-api-access-6llpc") pod "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" (UID: "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df"). InnerVolumeSpecName "kube-api-access-6llpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.716181 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" (UID: "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.734533 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-config-data" (OuterVolumeSpecName: "config-data") pod "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" (UID: "f5af617b-32bc-43a9-a8e0-6bb1fec1b4df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.781075 4751 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.781116 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6llpc\" (UniqueName: \"kubernetes.io/projected/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-kube-api-access-6llpc\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.781130 4751 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.781140 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.781151 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.781160 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.990630 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-748c66fdb6-xsx5t"] Feb 27 16:47:37 crc kubenswrapper[4751]: E0227 16:47:37.991318 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" containerName="cinder-db-sync" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.991341 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" containerName="cinder-db-sync" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.991647 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" containerName="cinder-db-sync" Feb 27 16:47:37 crc kubenswrapper[4751]: I0227 16:47:37.993015 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.001165 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.002245 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.005033 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-748c66fdb6-xsx5t"] Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.087642 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data-custom\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.087691 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-combined-ca-bundle\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.087730 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xctv6\" (UniqueName: \"kubernetes.io/projected/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-kube-api-access-xctv6\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.087764 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.087789 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-logs\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.087878 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-public-tls-certs\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.087960 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-internal-tls-certs\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.189777 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data-custom\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.189846 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-combined-ca-bundle\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.189881 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xctv6\" (UniqueName: \"kubernetes.io/projected/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-kube-api-access-xctv6\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.189956 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.189990 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-logs\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.190018 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-public-tls-certs\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.190130 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-internal-tls-certs\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.190702 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-logs\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.195843 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-combined-ca-bundle\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.197861 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-internal-tls-certs\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.198282 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data-custom\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.198524 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.199641 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" event={"ID":"a9f1619e-893b-4f17-b105-214ccbf6385e","Type":"ContainerStarted","Data":"b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d"} Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.203088 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" event={"ID":"a0840d34-f0f3-4bfd-a33c-29cc1e268586","Type":"ContainerStarted","Data":"72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe"} Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.203324 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-public-tls-certs\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.206606 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cd8468ed-ddbc-411a-9d7c-931e4962aed7","Type":"ContainerStarted","Data":"0e5afbc0f8cf3a0e0d85f77879d2485c73077546e61a791ee8c07cdfe948bba1"} Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.208957 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-6s84l" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.214511 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-6s84l" event={"ID":"f5af617b-32bc-43a9-a8e0-6bb1fec1b4df","Type":"ContainerDied","Data":"895fe98bce37f73a009722c40ac87803ff592a2e5d6f63da4c04560fac1d0b8c"} Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.214556 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="895fe98bce37f73a009722c40ac87803ff592a2e5d6f63da4c04560fac1d0b8c" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.215170 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xctv6\" (UniqueName: \"kubernetes.io/projected/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-kube-api-access-xctv6\") pod \"barbican-api-748c66fdb6-xsx5t\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.220234 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" podStartSLOduration=2.498165919 podStartE2EDuration="4.220219701s" podCreationTimestamp="2026-02-27 16:47:34 +0000 UTC" firstStartedPulling="2026-02-27 16:47:35.238948962 +0000 UTC m=+1417.385963409" lastFinishedPulling="2026-02-27 16:47:36.961002744 +0000 UTC m=+1419.108017191" observedRunningTime="2026-02-27 16:47:38.217689803 +0000 UTC m=+1420.364704250" watchObservedRunningTime="2026-02-27 16:47:38.220219701 +0000 UTC m=+1420.367234148" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.311523 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.400357 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.408394 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.408617 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.430155 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-76bv8" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.430395 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.430577 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.430715 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.499931 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.499976 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhr2w\" (UniqueName: \"kubernetes.io/projected/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-kube-api-access-vhr2w\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.500018 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-scripts\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.500040 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.500131 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.500154 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.508503 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-5kmmx"] Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.549837 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-d8ldz"] Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.562154 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.584447 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-d8ldz"] Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.607389 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhr2w\" (UniqueName: \"kubernetes.io/projected/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-kube-api-access-vhr2w\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.607520 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-scripts\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.607571 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.607846 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.607895 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.607935 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.610856 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.627780 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.630910 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhr2w\" (UniqueName: \"kubernetes.io/projected/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-kube-api-access-vhr2w\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.638640 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-scripts\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.640360 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.648091 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.712529 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.712634 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.712737 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-config\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.712806 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.712844 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct2mf\" (UniqueName: \"kubernetes.io/projected/b4cff4a9-9681-46d5-8c65-03812065b51e-kube-api-access-ct2mf\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.712875 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.749628 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.751574 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.761175 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.773052 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.814756 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-config\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.814827 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.814862 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct2mf\" (UniqueName: \"kubernetes.io/projected/b4cff4a9-9681-46d5-8c65-03812065b51e-kube-api-access-ct2mf\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.814893 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.814975 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.815033 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.821468 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.847169 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.849228 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.852036 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-config\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.860073 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.861935 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.880502 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct2mf\" (UniqueName: \"kubernetes.io/projected/b4cff4a9-9681-46d5-8c65-03812065b51e-kube-api-access-ct2mf\") pod \"dnsmasq-dns-5c9776ccc5-d8ldz\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.910905 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.940639 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bfa6d08-e3d0-426d-8320-ef5aed708e45-logs\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.940732 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data-custom\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.940775 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-scripts\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.940836 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpckz\" (UniqueName: \"kubernetes.io/projected/6bfa6d08-e3d0-426d-8320-ef5aed708e45-kube-api-access-mpckz\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.940862 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bfa6d08-e3d0-426d-8320-ef5aed708e45-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.940897 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.940954 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:38 crc kubenswrapper[4751]: I0227 16:47:38.994702 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-58dd9ff6bc-nq6b4" podUID="e72931dc-c81e-4f44-8e6b-72fab4e429b4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.146:5353: i/o timeout" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.052270 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bfa6d08-e3d0-426d-8320-ef5aed708e45-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.052523 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.052582 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.052604 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bfa6d08-e3d0-426d-8320-ef5aed708e45-logs\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.052649 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data-custom\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.052683 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-scripts\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.052731 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpckz\" (UniqueName: \"kubernetes.io/projected/6bfa6d08-e3d0-426d-8320-ef5aed708e45-kube-api-access-mpckz\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.053092 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bfa6d08-e3d0-426d-8320-ef5aed708e45-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.053751 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bfa6d08-e3d0-426d-8320-ef5aed708e45-logs\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.067395 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data-custom\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.067782 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-scripts\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.070080 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.096168 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-748c66fdb6-xsx5t"] Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.098141 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.099898 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpckz\" (UniqueName: \"kubernetes.io/projected/6bfa6d08-e3d0-426d-8320-ef5aed708e45-kube-api-access-mpckz\") pod \"cinder-api-0\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.234618 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cd8468ed-ddbc-411a-9d7c-931e4962aed7","Type":"ContainerStarted","Data":"e70c5a9c24c4c153e83571094fa186b2ff0ea4a949287b7b9727cb596b621d80"} Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.238973 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-748c66fdb6-xsx5t" event={"ID":"f9ca6eb2-820e-49ea-80ca-bd0e352d4243","Type":"ContainerStarted","Data":"834578fa0ad9ff02015b899a85b151bed3517c1c81a849a270c3f6aefecfe61d"} Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.249179 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" event={"ID":"a9f1619e-893b-4f17-b105-214ccbf6385e","Type":"ContainerStarted","Data":"cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e"} Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.250001 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" podUID="7419cb2d-20ce-4408-ad22-15e818562876" containerName="dnsmasq-dns" containerID="cri-o://1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be" gracePeriod=10 Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.273827 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" podStartSLOduration=2.81229199 podStartE2EDuration="5.273810595s" podCreationTimestamp="2026-02-27 16:47:34 +0000 UTC" firstStartedPulling="2026-02-27 16:47:35.496819191 +0000 UTC m=+1417.643833638" lastFinishedPulling="2026-02-27 16:47:37.958337786 +0000 UTC m=+1420.105352243" observedRunningTime="2026-02-27 16:47:39.267546958 +0000 UTC m=+1421.414561405" watchObservedRunningTime="2026-02-27 16:47:39.273810595 +0000 UTC m=+1421.420825042" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.381476 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.584244 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.653777 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-d8ldz"] Feb 27 16:47:39 crc kubenswrapper[4751]: W0227 16:47:39.656322 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4cff4a9_9681_46d5_8c65_03812065b51e.slice/crio-add9564ac1a48b896f39b2483f929486e5cf36622f460d98d00fe097ecea542b WatchSource:0}: Error finding container add9564ac1a48b896f39b2483f929486e5cf36622f460d98d00fe097ecea542b: Status 404 returned error can't find the container with id add9564ac1a48b896f39b2483f929486e5cf36622f460d98d00fe097ecea542b Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.790306 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.873750 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-config\") pod \"7419cb2d-20ce-4408-ad22-15e818562876\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.873836 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-swift-storage-0\") pod \"7419cb2d-20ce-4408-ad22-15e818562876\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.873861 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-svc\") pod \"7419cb2d-20ce-4408-ad22-15e818562876\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.873898 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-sb\") pod \"7419cb2d-20ce-4408-ad22-15e818562876\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.873995 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgtnm\" (UniqueName: \"kubernetes.io/projected/7419cb2d-20ce-4408-ad22-15e818562876-kube-api-access-cgtnm\") pod \"7419cb2d-20ce-4408-ad22-15e818562876\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.874099 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-nb\") pod \"7419cb2d-20ce-4408-ad22-15e818562876\" (UID: \"7419cb2d-20ce-4408-ad22-15e818562876\") " Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.901727 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7419cb2d-20ce-4408-ad22-15e818562876-kube-api-access-cgtnm" (OuterVolumeSpecName: "kube-api-access-cgtnm") pod "7419cb2d-20ce-4408-ad22-15e818562876" (UID: "7419cb2d-20ce-4408-ad22-15e818562876"). InnerVolumeSpecName "kube-api-access-cgtnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.942191 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:47:39 crc kubenswrapper[4751]: W0227 16:47:39.959813 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6bfa6d08_e3d0_426d_8320_ef5aed708e45.slice/crio-04bc7a069ef294b666e94f1a9f76e24845a5d8b1fa24cd087ff553c8b98025fb WatchSource:0}: Error finding container 04bc7a069ef294b666e94f1a9f76e24845a5d8b1fa24cd087ff553c8b98025fb: Status 404 returned error can't find the container with id 04bc7a069ef294b666e94f1a9f76e24845a5d8b1fa24cd087ff553c8b98025fb Feb 27 16:47:39 crc kubenswrapper[4751]: I0227 16:47:39.976321 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgtnm\" (UniqueName: \"kubernetes.io/projected/7419cb2d-20ce-4408-ad22-15e818562876-kube-api-access-cgtnm\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.036239 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7419cb2d-20ce-4408-ad22-15e818562876" (UID: "7419cb2d-20ce-4408-ad22-15e818562876"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.036305 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7419cb2d-20ce-4408-ad22-15e818562876" (UID: "7419cb2d-20ce-4408-ad22-15e818562876"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.036829 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-config" (OuterVolumeSpecName: "config") pod "7419cb2d-20ce-4408-ad22-15e818562876" (UID: "7419cb2d-20ce-4408-ad22-15e818562876"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.085019 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7419cb2d-20ce-4408-ad22-15e818562876" (UID: "7419cb2d-20ce-4408-ad22-15e818562876"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.086238 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.086267 4751 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.086279 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.086287 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.090874 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7419cb2d-20ce-4408-ad22-15e818562876" (UID: "7419cb2d-20ce-4408-ad22-15e818562876"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.188328 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7419cb2d-20ce-4408-ad22-15e818562876-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.269173 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-748c66fdb6-xsx5t" event={"ID":"f9ca6eb2-820e-49ea-80ca-bd0e352d4243","Type":"ContainerStarted","Data":"9fd5df7074b1fc7b9bf2a447c5d88215370bd4201e24afb0b45b856f50e14328"} Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.269218 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-748c66fdb6-xsx5t" event={"ID":"f9ca6eb2-820e-49ea-80ca-bd0e352d4243","Type":"ContainerStarted","Data":"66ec49a151bde81e12512fb05eabd11d784e82af2fa19e9c977a0f218bb55c6d"} Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.270264 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.270317 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.273943 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" event={"ID":"b4cff4a9-9681-46d5-8c65-03812065b51e","Type":"ContainerStarted","Data":"556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa"} Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.273987 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" event={"ID":"b4cff4a9-9681-46d5-8c65-03812065b51e","Type":"ContainerStarted","Data":"add9564ac1a48b896f39b2483f929486e5cf36622f460d98d00fe097ecea542b"} Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.280695 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f87cfaf4-92e8-4dae-b6cd-cd311f536f46","Type":"ContainerStarted","Data":"7e3d04847a6ed0d6c357b78fd9451c624553d7ccbca628f456561b13c2b6ba29"} Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.282639 4751 generic.go:334] "Generic (PLEG): container finished" podID="7419cb2d-20ce-4408-ad22-15e818562876" containerID="1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be" exitCode=0 Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.282679 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" event={"ID":"7419cb2d-20ce-4408-ad22-15e818562876","Type":"ContainerDied","Data":"1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be"} Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.282696 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" event={"ID":"7419cb2d-20ce-4408-ad22-15e818562876","Type":"ContainerDied","Data":"bcd2ad518d40564a5de80f74340eff727b426612b4709142be18da0cd7c3b621"} Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.282712 4751 scope.go:117] "RemoveContainer" containerID="1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.282822 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-5kmmx" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.292715 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6bfa6d08-e3d0-426d-8320-ef5aed708e45","Type":"ContainerStarted","Data":"04bc7a069ef294b666e94f1a9f76e24845a5d8b1fa24cd087ff553c8b98025fb"} Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.298498 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cd8468ed-ddbc-411a-9d7c-931e4962aed7","Type":"ContainerStarted","Data":"017219752f262c04392a06c79b3e5b23984099b62d57a9ab1a8e9564ebfc5f9a"} Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.306883 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-748c66fdb6-xsx5t" podStartSLOduration=3.306864482 podStartE2EDuration="3.306864482s" podCreationTimestamp="2026-02-27 16:47:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:40.292062376 +0000 UTC m=+1422.439076823" watchObservedRunningTime="2026-02-27 16:47:40.306864482 +0000 UTC m=+1422.453878929" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.330669 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-5kmmx"] Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.343928 4751 scope.go:117] "RemoveContainer" containerID="b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.345549 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-5kmmx"] Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.478249 4751 scope.go:117] "RemoveContainer" containerID="1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be" Feb 27 16:47:40 crc kubenswrapper[4751]: E0227 16:47:40.480893 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be\": container with ID starting with 1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be not found: ID does not exist" containerID="1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.480950 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be"} err="failed to get container status \"1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be\": rpc error: code = NotFound desc = could not find container \"1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be\": container with ID starting with 1edc26da90f372a8ea45701f22be030ed5a0542cec1c925afb5fdb2a8dc5a1be not found: ID does not exist" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.480987 4751 scope.go:117] "RemoveContainer" containerID="b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed" Feb 27 16:47:40 crc kubenswrapper[4751]: E0227 16:47:40.495151 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed\": container with ID starting with b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed not found: ID does not exist" containerID="b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.495463 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed"} err="failed to get container status \"b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed\": rpc error: code = NotFound desc = could not find container \"b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed\": container with ID starting with b68e71165a27e0158a3056c575d4e6024f19287ec939a8079851cfa42d2c79ed not found: ID does not exist" Feb 27 16:47:40 crc kubenswrapper[4751]: I0227 16:47:40.543925 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7419cb2d-20ce-4408-ad22-15e818562876" path="/var/lib/kubelet/pods/7419cb2d-20ce-4408-ad22-15e818562876/volumes" Feb 27 16:47:41 crc kubenswrapper[4751]: I0227 16:47:41.313154 4751 generic.go:334] "Generic (PLEG): container finished" podID="b4cff4a9-9681-46d5-8c65-03812065b51e" containerID="556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa" exitCode=0 Feb 27 16:47:41 crc kubenswrapper[4751]: I0227 16:47:41.313455 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" event={"ID":"b4cff4a9-9681-46d5-8c65-03812065b51e","Type":"ContainerDied","Data":"556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa"} Feb 27 16:47:41 crc kubenswrapper[4751]: I0227 16:47:41.363903 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6bfa6d08-e3d0-426d-8320-ef5aed708e45","Type":"ContainerStarted","Data":"c55df533ad44a5e3c2a9c98eff24a68f42314c9cb5babbde72166985309351e3"} Feb 27 16:47:41 crc kubenswrapper[4751]: I0227 16:47:41.367227 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:47:41 crc kubenswrapper[4751]: I0227 16:47:41.368877 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cd8468ed-ddbc-411a-9d7c-931e4962aed7","Type":"ContainerStarted","Data":"4c2ba9196fe61f9a742bb05acc42e633d0308530bb30967e5c81e901a8783cc1"} Feb 27 16:47:42 crc kubenswrapper[4751]: I0227 16:47:42.379829 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" event={"ID":"b4cff4a9-9681-46d5-8c65-03812065b51e","Type":"ContainerStarted","Data":"bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d"} Feb 27 16:47:42 crc kubenswrapper[4751]: I0227 16:47:42.380529 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:42 crc kubenswrapper[4751]: I0227 16:47:42.381675 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f87cfaf4-92e8-4dae-b6cd-cd311f536f46","Type":"ContainerStarted","Data":"745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9"} Feb 27 16:47:42 crc kubenswrapper[4751]: I0227 16:47:42.384012 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" containerName="cinder-api-log" containerID="cri-o://c55df533ad44a5e3c2a9c98eff24a68f42314c9cb5babbde72166985309351e3" gracePeriod=30 Feb 27 16:47:42 crc kubenswrapper[4751]: I0227 16:47:42.384079 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6bfa6d08-e3d0-426d-8320-ef5aed708e45","Type":"ContainerStarted","Data":"c629e4175fbd197b576f996f6510fcdef8e308c1fe93b0868b3d76362ccba5f5"} Feb 27 16:47:42 crc kubenswrapper[4751]: I0227 16:47:42.384108 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Feb 27 16:47:42 crc kubenswrapper[4751]: I0227 16:47:42.384133 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" containerName="cinder-api" containerID="cri-o://c629e4175fbd197b576f996f6510fcdef8e308c1fe93b0868b3d76362ccba5f5" gracePeriod=30 Feb 27 16:47:42 crc kubenswrapper[4751]: I0227 16:47:42.521055 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" podStartSLOduration=4.521037898 podStartE2EDuration="4.521037898s" podCreationTimestamp="2026-02-27 16:47:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:42.500376426 +0000 UTC m=+1424.647390863" watchObservedRunningTime="2026-02-27 16:47:42.521037898 +0000 UTC m=+1424.668052345" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.403463 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f87cfaf4-92e8-4dae-b6cd-cd311f536f46","Type":"ContainerStarted","Data":"f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569"} Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.405836 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6bfa6d08-e3d0-426d-8320-ef5aed708e45","Type":"ContainerDied","Data":"c629e4175fbd197b576f996f6510fcdef8e308c1fe93b0868b3d76362ccba5f5"} Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.405478 4751 generic.go:334] "Generic (PLEG): container finished" podID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" containerID="c629e4175fbd197b576f996f6510fcdef8e308c1fe93b0868b3d76362ccba5f5" exitCode=0 Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.405963 4751 generic.go:334] "Generic (PLEG): container finished" podID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" containerID="c55df533ad44a5e3c2a9c98eff24a68f42314c9cb5babbde72166985309351e3" exitCode=143 Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.406035 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6bfa6d08-e3d0-426d-8320-ef5aed708e45","Type":"ContainerDied","Data":"c55df533ad44a5e3c2a9c98eff24a68f42314c9cb5babbde72166985309351e3"} Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.436197 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.436175574 podStartE2EDuration="5.436175574s" podCreationTimestamp="2026-02-27 16:47:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:42.518819359 +0000 UTC m=+1424.665833806" watchObservedRunningTime="2026-02-27 16:47:43.436175574 +0000 UTC m=+1425.583190021" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.438778 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.42526519 podStartE2EDuration="5.438768934s" podCreationTimestamp="2026-02-27 16:47:38 +0000 UTC" firstStartedPulling="2026-02-27 16:47:39.676544944 +0000 UTC m=+1421.823559401" lastFinishedPulling="2026-02-27 16:47:40.690048698 +0000 UTC m=+1422.837063145" observedRunningTime="2026-02-27 16:47:43.432026134 +0000 UTC m=+1425.579040601" watchObservedRunningTime="2026-02-27 16:47:43.438768934 +0000 UTC m=+1425.585783381" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.816216 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.820552 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bfa6d08-e3d0-426d-8320-ef5aed708e45-logs\") pod \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.820613 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-scripts\") pod \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.820714 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data-custom\") pod \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.820757 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpckz\" (UniqueName: \"kubernetes.io/projected/6bfa6d08-e3d0-426d-8320-ef5aed708e45-kube-api-access-mpckz\") pod \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.820779 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bfa6d08-e3d0-426d-8320-ef5aed708e45-etc-machine-id\") pod \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.820811 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-combined-ca-bundle\") pod \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.820838 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data\") pod \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\" (UID: \"6bfa6d08-e3d0-426d-8320-ef5aed708e45\") " Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.822428 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bfa6d08-e3d0-426d-8320-ef5aed708e45-logs" (OuterVolumeSpecName: "logs") pod "6bfa6d08-e3d0-426d-8320-ef5aed708e45" (UID: "6bfa6d08-e3d0-426d-8320-ef5aed708e45"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.823032 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6bfa6d08-e3d0-426d-8320-ef5aed708e45-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6bfa6d08-e3d0-426d-8320-ef5aed708e45" (UID: "6bfa6d08-e3d0-426d-8320-ef5aed708e45"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.823925 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.826795 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6bfa6d08-e3d0-426d-8320-ef5aed708e45" (UID: "6bfa6d08-e3d0-426d-8320-ef5aed708e45"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.827703 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-scripts" (OuterVolumeSpecName: "scripts") pod "6bfa6d08-e3d0-426d-8320-ef5aed708e45" (UID: "6bfa6d08-e3d0-426d-8320-ef5aed708e45"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.835370 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bfa6d08-e3d0-426d-8320-ef5aed708e45-kube-api-access-mpckz" (OuterVolumeSpecName: "kube-api-access-mpckz") pod "6bfa6d08-e3d0-426d-8320-ef5aed708e45" (UID: "6bfa6d08-e3d0-426d-8320-ef5aed708e45"). InnerVolumeSpecName "kube-api-access-mpckz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.883624 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6bfa6d08-e3d0-426d-8320-ef5aed708e45" (UID: "6bfa6d08-e3d0-426d-8320-ef5aed708e45"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.900199 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data" (OuterVolumeSpecName: "config-data") pod "6bfa6d08-e3d0-426d-8320-ef5aed708e45" (UID: "6bfa6d08-e3d0-426d-8320-ef5aed708e45"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.923931 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.924052 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpckz\" (UniqueName: \"kubernetes.io/projected/6bfa6d08-e3d0-426d-8320-ef5aed708e45-kube-api-access-mpckz\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.924080 4751 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bfa6d08-e3d0-426d-8320-ef5aed708e45-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.924091 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.924101 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.924111 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bfa6d08-e3d0-426d-8320-ef5aed708e45-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:43 crc kubenswrapper[4751]: I0227 16:47:43.924120 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bfa6d08-e3d0-426d-8320-ef5aed708e45-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.417042 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cd8468ed-ddbc-411a-9d7c-931e4962aed7","Type":"ContainerStarted","Data":"729ce1054b27f73b6a024fc676fad5540516a8af8874ef070a3dca545c4854b9"} Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.417425 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.421295 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.421483 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6bfa6d08-e3d0-426d-8320-ef5aed708e45","Type":"ContainerDied","Data":"04bc7a069ef294b666e94f1a9f76e24845a5d8b1fa24cd087ff553c8b98025fb"} Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.421526 4751 scope.go:117] "RemoveContainer" containerID="c629e4175fbd197b576f996f6510fcdef8e308c1fe93b0868b3d76362ccba5f5" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.444671 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.334623857 podStartE2EDuration="8.444655034s" podCreationTimestamp="2026-02-27 16:47:36 +0000 UTC" firstStartedPulling="2026-02-27 16:47:37.203561313 +0000 UTC m=+1419.350575760" lastFinishedPulling="2026-02-27 16:47:43.31359249 +0000 UTC m=+1425.460606937" observedRunningTime="2026-02-27 16:47:44.442910998 +0000 UTC m=+1426.589925455" watchObservedRunningTime="2026-02-27 16:47:44.444655034 +0000 UTC m=+1426.591669481" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.448780 4751 scope.go:117] "RemoveContainer" containerID="c55df533ad44a5e3c2a9c98eff24a68f42314c9cb5babbde72166985309351e3" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.495069 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.512946 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.537386 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" path="/var/lib/kubelet/pods/6bfa6d08-e3d0-426d-8320-ef5aed708e45/volumes" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.546893 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:47:44 crc kubenswrapper[4751]: E0227 16:47:44.547281 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" containerName="cinder-api-log" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.547300 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" containerName="cinder-api-log" Feb 27 16:47:44 crc kubenswrapper[4751]: E0227 16:47:44.547311 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7419cb2d-20ce-4408-ad22-15e818562876" containerName="dnsmasq-dns" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.547318 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7419cb2d-20ce-4408-ad22-15e818562876" containerName="dnsmasq-dns" Feb 27 16:47:44 crc kubenswrapper[4751]: E0227 16:47:44.547335 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7419cb2d-20ce-4408-ad22-15e818562876" containerName="init" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.547342 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7419cb2d-20ce-4408-ad22-15e818562876" containerName="init" Feb 27 16:47:44 crc kubenswrapper[4751]: E0227 16:47:44.547352 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" containerName="cinder-api" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.547357 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" containerName="cinder-api" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.547530 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" containerName="cinder-api-log" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.547551 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="7419cb2d-20ce-4408-ad22-15e818562876" containerName="dnsmasq-dns" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.547565 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bfa6d08-e3d0-426d-8320-ef5aed708e45" containerName="cinder-api" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.548879 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.551492 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.551837 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.552805 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.558435 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.642062 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.642114 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kphrv\" (UniqueName: \"kubernetes.io/projected/27f559b3-2c7d-4567-b836-702db66d74ae-kube-api-access-kphrv\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.642134 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/27f559b3-2c7d-4567-b836-702db66d74ae-etc-machine-id\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.642150 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.642353 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data-custom\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.642395 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27f559b3-2c7d-4567-b836-702db66d74ae-logs\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.642528 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.642700 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-public-tls-certs\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.642761 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-scripts\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.744436 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.744489 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kphrv\" (UniqueName: \"kubernetes.io/projected/27f559b3-2c7d-4567-b836-702db66d74ae-kube-api-access-kphrv\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.744507 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/27f559b3-2c7d-4567-b836-702db66d74ae-etc-machine-id\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.744522 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.744563 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data-custom\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.744578 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27f559b3-2c7d-4567-b836-702db66d74ae-logs\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.744612 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.744662 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-public-tls-certs\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.744688 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-scripts\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.745006 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/27f559b3-2c7d-4567-b836-702db66d74ae-etc-machine-id\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.745446 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27f559b3-2c7d-4567-b836-702db66d74ae-logs\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.749350 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-scripts\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.751209 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.751468 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.751985 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.752805 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-public-tls-certs\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.753321 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data-custom\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.763846 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kphrv\" (UniqueName: \"kubernetes.io/projected/27f559b3-2c7d-4567-b836-702db66d74ae-kube-api-access-kphrv\") pod \"cinder-api-0\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " pod="openstack/cinder-api-0" Feb 27 16:47:44 crc kubenswrapper[4751]: I0227 16:47:44.871106 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 27 16:47:45 crc kubenswrapper[4751]: I0227 16:47:45.387835 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:47:45 crc kubenswrapper[4751]: W0227 16:47:45.390551 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27f559b3_2c7d_4567_b836_702db66d74ae.slice/crio-5497e8b0e8cdc95de37e0fce91a6708e2109da49f34acf9b3f21f6f147f5e74c WatchSource:0}: Error finding container 5497e8b0e8cdc95de37e0fce91a6708e2109da49f34acf9b3f21f6f147f5e74c: Status 404 returned error can't find the container with id 5497e8b0e8cdc95de37e0fce91a6708e2109da49f34acf9b3f21f6f147f5e74c Feb 27 16:47:45 crc kubenswrapper[4751]: I0227 16:47:45.431472 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"27f559b3-2c7d-4567-b836-702db66d74ae","Type":"ContainerStarted","Data":"5497e8b0e8cdc95de37e0fce91a6708e2109da49f34acf9b3f21f6f147f5e74c"} Feb 27 16:47:46 crc kubenswrapper[4751]: I0227 16:47:46.447110 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"27f559b3-2c7d-4567-b836-702db66d74ae","Type":"ContainerStarted","Data":"1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1"} Feb 27 16:47:46 crc kubenswrapper[4751]: I0227 16:47:46.466214 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:46 crc kubenswrapper[4751]: I0227 16:47:46.602790 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:47 crc kubenswrapper[4751]: I0227 16:47:47.459358 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"27f559b3-2c7d-4567-b836-702db66d74ae","Type":"ContainerStarted","Data":"fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9"} Feb 27 16:47:47 crc kubenswrapper[4751]: I0227 16:47:47.481147 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.481131627 podStartE2EDuration="3.481131627s" podCreationTimestamp="2026-02-27 16:47:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:47.475565488 +0000 UTC m=+1429.622579935" watchObservedRunningTime="2026-02-27 16:47:47.481131627 +0000 UTC m=+1429.628146074" Feb 27 16:47:48 crc kubenswrapper[4751]: I0227 16:47:48.472353 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Feb 27 16:47:48 crc kubenswrapper[4751]: I0227 16:47:48.622145 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:48 crc kubenswrapper[4751]: I0227 16:47:48.943204 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.020819 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-ssjx5"] Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.021046 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" podUID="ea037294-8525-4c48-a867-d66f97c08253" containerName="dnsmasq-dns" containerID="cri-o://83d6a2faf6b2015d89f9922dc8ce2721c834106a2150043178c9ca758685d04d" gracePeriod=10 Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.104577 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.151382 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.510621 4751 generic.go:334] "Generic (PLEG): container finished" podID="ea037294-8525-4c48-a867-d66f97c08253" containerID="83d6a2faf6b2015d89f9922dc8ce2721c834106a2150043178c9ca758685d04d" exitCode=0 Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.511215 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" containerName="cinder-scheduler" containerID="cri-o://745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9" gracePeriod=30 Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.511650 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" event={"ID":"ea037294-8525-4c48-a867-d66f97c08253","Type":"ContainerDied","Data":"83d6a2faf6b2015d89f9922dc8ce2721c834106a2150043178c9ca758685d04d"} Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.512049 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" containerName="probe" containerID="cri-o://f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569" gracePeriod=30 Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.573082 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.590352 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.724943 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.850941 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-nb\") pod \"ea037294-8525-4c48-a867-d66f97c08253\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.851039 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-config\") pod \"ea037294-8525-4c48-a867-d66f97c08253\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.851156 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2wlh\" (UniqueName: \"kubernetes.io/projected/ea037294-8525-4c48-a867-d66f97c08253-kube-api-access-r2wlh\") pod \"ea037294-8525-4c48-a867-d66f97c08253\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.851204 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-svc\") pod \"ea037294-8525-4c48-a867-d66f97c08253\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.851236 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-swift-storage-0\") pod \"ea037294-8525-4c48-a867-d66f97c08253\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.851320 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-sb\") pod \"ea037294-8525-4c48-a867-d66f97c08253\" (UID: \"ea037294-8525-4c48-a867-d66f97c08253\") " Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.866710 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea037294-8525-4c48-a867-d66f97c08253-kube-api-access-r2wlh" (OuterVolumeSpecName: "kube-api-access-r2wlh") pod "ea037294-8525-4c48-a867-d66f97c08253" (UID: "ea037294-8525-4c48-a867-d66f97c08253"). InnerVolumeSpecName "kube-api-access-r2wlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.959231 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2wlh\" (UniqueName: \"kubernetes.io/projected/ea037294-8525-4c48-a867-d66f97c08253-kube-api-access-r2wlh\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:49 crc kubenswrapper[4751]: I0227 16:47:49.986199 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ea037294-8525-4c48-a867-d66f97c08253" (UID: "ea037294-8525-4c48-a867-d66f97c08253"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.002947 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ea037294-8525-4c48-a867-d66f97c08253" (UID: "ea037294-8525-4c48-a867-d66f97c08253"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.003355 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ea037294-8525-4c48-a867-d66f97c08253" (UID: "ea037294-8525-4c48-a867-d66f97c08253"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.014902 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-config" (OuterVolumeSpecName: "config") pod "ea037294-8525-4c48-a867-d66f97c08253" (UID: "ea037294-8525-4c48-a867-d66f97c08253"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.061631 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.061897 4751 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.061985 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.062053 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.105065 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ea037294-8525-4c48-a867-d66f97c08253" (UID: "ea037294-8525-4c48-a867-d66f97c08253"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.164618 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea037294-8525-4c48-a867-d66f97c08253-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.529651 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.561919 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-ssjx5" event={"ID":"ea037294-8525-4c48-a867-d66f97c08253","Type":"ContainerDied","Data":"10f4bcbaeaaaed13eba99b0198a49a3028c37d8640275666acfac2c3ac0c5a94"} Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.562091 4751 scope.go:117] "RemoveContainer" containerID="83d6a2faf6b2015d89f9922dc8ce2721c834106a2150043178c9ca758685d04d" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.612307 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-ssjx5"] Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.618554 4751 scope.go:117] "RemoveContainer" containerID="656de3f15bfaf3e3acb2eb6f47617a535be06f8850c915799511fbd0adcb13d9" Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.631743 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-ssjx5"] Feb 27 16:47:50 crc kubenswrapper[4751]: I0227 16:47:50.862963 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.009885 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.018349 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.123999 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.267431 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.272001 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.371049 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7659f547c6-hgdwt"] Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.383085 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7659f547c6-hgdwt" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerName="barbican-api-log" containerID="cri-o://cd27bf82eb04cb3fddf318ca5f710fddb5fc8873e483c05ec4bacbc336c6ba0a" gracePeriod=30 Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.383754 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7659f547c6-hgdwt" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerName="barbican-api" containerID="cri-o://e185e017aabd6eea6f75a03f5e4ac18ff4ed67ec022a4811b85b1795a1c5cb13" gracePeriod=30 Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.385917 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-c7b6db6d-k4vfr"] Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.555703 4751 generic.go:334] "Generic (PLEG): container finished" podID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerID="cd27bf82eb04cb3fddf318ca5f710fddb5fc8873e483c05ec4bacbc336c6ba0a" exitCode=143 Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.555784 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7659f547c6-hgdwt" event={"ID":"1e512657-d0e1-4289-b430-0fc78d20aca7","Type":"ContainerDied","Data":"cd27bf82eb04cb3fddf318ca5f710fddb5fc8873e483c05ec4bacbc336c6ba0a"} Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.561714 4751 generic.go:334] "Generic (PLEG): container finished" podID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" containerID="f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569" exitCode=0 Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.561807 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f87cfaf4-92e8-4dae-b6cd-cd311f536f46","Type":"ContainerDied","Data":"f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569"} Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.682727 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.706258 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.772278 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-78bc7f9d5b-gzgqp"] Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.772513 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-78bc7f9d5b-gzgqp" podUID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" containerName="neutron-api" containerID="cri-o://76b78dde5323d728cf0176c3ec46089efcdee6853d62d69a7eea78cb609893bd" gracePeriod=30 Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.772643 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-78bc7f9d5b-gzgqp" podUID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" containerName="neutron-httpd" containerID="cri-o://627b5af683b614bccc6f8a8ba3d9f8d587a67e596d3eda586ddd8cd0c89ca727" gracePeriod=30 Feb 27 16:47:51 crc kubenswrapper[4751]: I0227 16:47:51.959566 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:47:52 crc kubenswrapper[4751]: I0227 16:47:52.534047 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea037294-8525-4c48-a867-d66f97c08253" path="/var/lib/kubelet/pods/ea037294-8525-4c48-a867-d66f97c08253/volumes" Feb 27 16:47:52 crc kubenswrapper[4751]: I0227 16:47:52.589003 4751 generic.go:334] "Generic (PLEG): container finished" podID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" containerID="627b5af683b614bccc6f8a8ba3d9f8d587a67e596d3eda586ddd8cd0c89ca727" exitCode=0 Feb 27 16:47:52 crc kubenswrapper[4751]: I0227 16:47:52.589082 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78bc7f9d5b-gzgqp" event={"ID":"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe","Type":"ContainerDied","Data":"627b5af683b614bccc6f8a8ba3d9f8d587a67e596d3eda586ddd8cd0c89ca727"} Feb 27 16:47:52 crc kubenswrapper[4751]: I0227 16:47:52.589517 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-c7b6db6d-k4vfr" podUID="9b497134-1c13-450d-830a-0e0e7d51fe9d" containerName="placement-log" containerID="cri-o://ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e" gracePeriod=30 Feb 27 16:47:52 crc kubenswrapper[4751]: I0227 16:47:52.589540 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-c7b6db6d-k4vfr" podUID="9b497134-1c13-450d-830a-0e0e7d51fe9d" containerName="placement-api" containerID="cri-o://d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d" gracePeriod=30 Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.523758 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.604069 4751 generic.go:334] "Generic (PLEG): container finished" podID="9b497134-1c13-450d-830a-0e0e7d51fe9d" containerID="ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e" exitCode=143 Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.604130 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c7b6db6d-k4vfr" event={"ID":"9b497134-1c13-450d-830a-0e0e7d51fe9d","Type":"ContainerDied","Data":"ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e"} Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.605589 4751 generic.go:334] "Generic (PLEG): container finished" podID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" containerID="745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9" exitCode=0 Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.605617 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f87cfaf4-92e8-4dae-b6cd-cd311f536f46","Type":"ContainerDied","Data":"745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9"} Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.605632 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f87cfaf4-92e8-4dae-b6cd-cd311f536f46","Type":"ContainerDied","Data":"7e3d04847a6ed0d6c357b78fd9451c624553d7ccbca628f456561b13c2b6ba29"} Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.605649 4751 scope.go:117] "RemoveContainer" containerID="f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.605788 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.635742 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-etc-machine-id\") pod \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.635820 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-scripts\") pod \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.635872 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data\") pod \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.636533 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data-custom\") pod \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.636611 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhr2w\" (UniqueName: \"kubernetes.io/projected/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-kube-api-access-vhr2w\") pod \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.636706 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-combined-ca-bundle\") pod \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\" (UID: \"f87cfaf4-92e8-4dae-b6cd-cd311f536f46\") " Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.637799 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f87cfaf4-92e8-4dae-b6cd-cd311f536f46" (UID: "f87cfaf4-92e8-4dae-b6cd-cd311f536f46"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.647742 4751 scope.go:117] "RemoveContainer" containerID="745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.659338 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-scripts" (OuterVolumeSpecName: "scripts") pod "f87cfaf4-92e8-4dae-b6cd-cd311f536f46" (UID: "f87cfaf4-92e8-4dae-b6cd-cd311f536f46"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.660616 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f87cfaf4-92e8-4dae-b6cd-cd311f536f46" (UID: "f87cfaf4-92e8-4dae-b6cd-cd311f536f46"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.661570 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-kube-api-access-vhr2w" (OuterVolumeSpecName: "kube-api-access-vhr2w") pod "f87cfaf4-92e8-4dae-b6cd-cd311f536f46" (UID: "f87cfaf4-92e8-4dae-b6cd-cd311f536f46"). InnerVolumeSpecName "kube-api-access-vhr2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.717722 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f87cfaf4-92e8-4dae-b6cd-cd311f536f46" (UID: "f87cfaf4-92e8-4dae-b6cd-cd311f536f46"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.738844 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.738873 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhr2w\" (UniqueName: \"kubernetes.io/projected/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-kube-api-access-vhr2w\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.738884 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.738892 4751 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.738901 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.741215 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data" (OuterVolumeSpecName: "config-data") pod "f87cfaf4-92e8-4dae-b6cd-cd311f536f46" (UID: "f87cfaf4-92e8-4dae-b6cd-cd311f536f46"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.829170 4751 scope.go:117] "RemoveContainer" containerID="f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569" Feb 27 16:47:53 crc kubenswrapper[4751]: E0227 16:47:53.829966 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569\": container with ID starting with f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569 not found: ID does not exist" containerID="f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.830004 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569"} err="failed to get container status \"f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569\": rpc error: code = NotFound desc = could not find container \"f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569\": container with ID starting with f58ef56f42fa1f7dc882411155fdd345360c70e9592b48a40f425a6e0cf8a569 not found: ID does not exist" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.830032 4751 scope.go:117] "RemoveContainer" containerID="745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9" Feb 27 16:47:53 crc kubenswrapper[4751]: E0227 16:47:53.830324 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9\": container with ID starting with 745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9 not found: ID does not exist" containerID="745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.830418 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9"} err="failed to get container status \"745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9\": rpc error: code = NotFound desc = could not find container \"745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9\": container with ID starting with 745b9390130389b7bbbd5eec7f7f90fe66fa049ea4405a3ecfd16510e769b6a9 not found: ID does not exist" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.840833 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87cfaf4-92e8-4dae-b6cd-cd311f536f46-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.936642 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.947317 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.957457 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:47:53 crc kubenswrapper[4751]: E0227 16:47:53.958040 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" containerName="probe" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.958119 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" containerName="probe" Feb 27 16:47:53 crc kubenswrapper[4751]: E0227 16:47:53.958192 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea037294-8525-4c48-a867-d66f97c08253" containerName="dnsmasq-dns" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.958241 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea037294-8525-4c48-a867-d66f97c08253" containerName="dnsmasq-dns" Feb 27 16:47:53 crc kubenswrapper[4751]: E0227 16:47:53.958301 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea037294-8525-4c48-a867-d66f97c08253" containerName="init" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.958347 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea037294-8525-4c48-a867-d66f97c08253" containerName="init" Feb 27 16:47:53 crc kubenswrapper[4751]: E0227 16:47:53.959629 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" containerName="cinder-scheduler" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.959717 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" containerName="cinder-scheduler" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.960011 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea037294-8525-4c48-a867-d66f97c08253" containerName="dnsmasq-dns" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.960099 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" containerName="probe" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.960161 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" containerName="cinder-scheduler" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.961133 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 27 16:47:53 crc kubenswrapper[4751]: I0227 16:47:53.963116 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.019061 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.043614 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.043827 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-scripts\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.043976 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.044099 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.044341 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/23d98e0b-8d21-4ad9-b3a4-716c1d221949-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.044441 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjzj5\" (UniqueName: \"kubernetes.io/projected/23d98e0b-8d21-4ad9-b3a4-716c1d221949-kube-api-access-cjzj5\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.145777 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/23d98e0b-8d21-4ad9-b3a4-716c1d221949-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.145841 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjzj5\" (UniqueName: \"kubernetes.io/projected/23d98e0b-8d21-4ad9-b3a4-716c1d221949-kube-api-access-cjzj5\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.145875 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.145891 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-scripts\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.145889 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/23d98e0b-8d21-4ad9-b3a4-716c1d221949-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.145923 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.145957 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.150862 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-scripts\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.150944 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.151205 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.160938 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.168019 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjzj5\" (UniqueName: \"kubernetes.io/projected/23d98e0b-8d21-4ad9-b3a4-716c1d221949-kube-api-access-cjzj5\") pod \"cinder-scheduler-0\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.276530 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.543065 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f87cfaf4-92e8-4dae-b6cd-cd311f536f46" path="/var/lib/kubelet/pods/f87cfaf4-92e8-4dae-b6cd-cd311f536f46/volumes" Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.616047 4751 generic.go:334] "Generic (PLEG): container finished" podID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerID="e185e017aabd6eea6f75a03f5e4ac18ff4ed67ec022a4811b85b1795a1c5cb13" exitCode=0 Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.616139 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7659f547c6-hgdwt" event={"ID":"1e512657-d0e1-4289-b430-0fc78d20aca7","Type":"ContainerDied","Data":"e185e017aabd6eea6f75a03f5e4ac18ff4ed67ec022a4811b85b1795a1c5cb13"} Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.739165 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:47:54 crc kubenswrapper[4751]: I0227 16:47:54.912551 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.069259 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data\") pod \"1e512657-d0e1-4289-b430-0fc78d20aca7\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.069329 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e512657-d0e1-4289-b430-0fc78d20aca7-logs\") pod \"1e512657-d0e1-4289-b430-0fc78d20aca7\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.069450 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tr2j\" (UniqueName: \"kubernetes.io/projected/1e512657-d0e1-4289-b430-0fc78d20aca7-kube-api-access-6tr2j\") pod \"1e512657-d0e1-4289-b430-0fc78d20aca7\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.069493 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-combined-ca-bundle\") pod \"1e512657-d0e1-4289-b430-0fc78d20aca7\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.069543 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data-custom\") pod \"1e512657-d0e1-4289-b430-0fc78d20aca7\" (UID: \"1e512657-d0e1-4289-b430-0fc78d20aca7\") " Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.070177 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e512657-d0e1-4289-b430-0fc78d20aca7-logs" (OuterVolumeSpecName: "logs") pod "1e512657-d0e1-4289-b430-0fc78d20aca7" (UID: "1e512657-d0e1-4289-b430-0fc78d20aca7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.074549 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1e512657-d0e1-4289-b430-0fc78d20aca7" (UID: "1e512657-d0e1-4289-b430-0fc78d20aca7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.083581 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e512657-d0e1-4289-b430-0fc78d20aca7-kube-api-access-6tr2j" (OuterVolumeSpecName: "kube-api-access-6tr2j") pod "1e512657-d0e1-4289-b430-0fc78d20aca7" (UID: "1e512657-d0e1-4289-b430-0fc78d20aca7"). InnerVolumeSpecName "kube-api-access-6tr2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.099550 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e512657-d0e1-4289-b430-0fc78d20aca7" (UID: "1e512657-d0e1-4289-b430-0fc78d20aca7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.120863 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data" (OuterVolumeSpecName: "config-data") pod "1e512657-d0e1-4289-b430-0fc78d20aca7" (UID: "1e512657-d0e1-4289-b430-0fc78d20aca7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.171667 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.171698 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e512657-d0e1-4289-b430-0fc78d20aca7-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.171708 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tr2j\" (UniqueName: \"kubernetes.io/projected/1e512657-d0e1-4289-b430-0fc78d20aca7-kube-api-access-6tr2j\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.171717 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.171725 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e512657-d0e1-4289-b430-0fc78d20aca7-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.628897 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"23d98e0b-8d21-4ad9-b3a4-716c1d221949","Type":"ContainerStarted","Data":"975963e810405e7a1f164ed08541517bb44532e23c9b968a1511ad894a22d948"} Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.629307 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"23d98e0b-8d21-4ad9-b3a4-716c1d221949","Type":"ContainerStarted","Data":"86da512d71e64953a8fbea39e3f4571119737e50e0bfe9c059be0c0424c5d93c"} Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.633737 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7659f547c6-hgdwt" event={"ID":"1e512657-d0e1-4289-b430-0fc78d20aca7","Type":"ContainerDied","Data":"e4b919c61a8f254548b3a9b67273690bde47ef59a01a22ac35627a04a8753f6d"} Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.633797 4751 scope.go:117] "RemoveContainer" containerID="e185e017aabd6eea6f75a03f5e4ac18ff4ed67ec022a4811b85b1795a1c5cb13" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.633805 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7659f547c6-hgdwt" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.658880 4751 scope.go:117] "RemoveContainer" containerID="cd27bf82eb04cb3fddf318ca5f710fddb5fc8873e483c05ec4bacbc336c6ba0a" Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.727246 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7659f547c6-hgdwt"] Feb 27 16:47:55 crc kubenswrapper[4751]: I0227 16:47:55.740154 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7659f547c6-hgdwt"] Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.506618 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-ccb964dc9-cj74q"] Feb 27 16:47:56 crc kubenswrapper[4751]: E0227 16:47:56.507241 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerName="barbican-api-log" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.507252 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerName="barbican-api-log" Feb 27 16:47:56 crc kubenswrapper[4751]: E0227 16:47:56.507274 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerName="barbican-api" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.507281 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerName="barbican-api" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.507467 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerName="barbican-api-log" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.507489 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerName="barbican-api" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.508368 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.532254 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.534935 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.535591 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.556376 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" path="/var/lib/kubelet/pods/1e512657-d0e1-4289-b430-0fc78d20aca7/volumes" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.556988 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-ccb964dc9-cj74q"] Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.557726 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.595730 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-run-httpd\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.596054 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-log-httpd\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.596121 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-combined-ca-bundle\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.596154 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-config-data\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.596177 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6xwg\" (UniqueName: \"kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-kube-api-access-w6xwg\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.596227 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-etc-swift\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.596263 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-public-tls-certs\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.596359 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-internal-tls-certs\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.667955 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.668420 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="ceilometer-central-agent" containerID="cri-o://e70c5a9c24c4c153e83571094fa186b2ff0ea4a949287b7b9727cb596b621d80" gracePeriod=30 Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.669070 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="proxy-httpd" containerID="cri-o://729ce1054b27f73b6a024fc676fad5540516a8af8874ef070a3dca545c4854b9" gracePeriod=30 Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.669127 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="sg-core" containerID="cri-o://4c2ba9196fe61f9a742bb05acc42e633d0308530bb30967e5c81e901a8783cc1" gracePeriod=30 Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.669156 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="ceilometer-notification-agent" containerID="cri-o://017219752f262c04392a06c79b3e5b23984099b62d57a9ab1a8e9564ebfc5f9a" gracePeriod=30 Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.695129 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.700721 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2sbf\" (UniqueName: \"kubernetes.io/projected/9b497134-1c13-450d-830a-0e0e7d51fe9d-kube-api-access-s2sbf\") pod \"9b497134-1c13-450d-830a-0e0e7d51fe9d\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.700797 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b497134-1c13-450d-830a-0e0e7d51fe9d-logs\") pod \"9b497134-1c13-450d-830a-0e0e7d51fe9d\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.700857 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-internal-tls-certs\") pod \"9b497134-1c13-450d-830a-0e0e7d51fe9d\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.700887 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-public-tls-certs\") pod \"9b497134-1c13-450d-830a-0e0e7d51fe9d\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.700928 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-scripts\") pod \"9b497134-1c13-450d-830a-0e0e7d51fe9d\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.701010 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-config-data\") pod \"9b497134-1c13-450d-830a-0e0e7d51fe9d\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.701057 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-combined-ca-bundle\") pod \"9b497134-1c13-450d-830a-0e0e7d51fe9d\" (UID: \"9b497134-1c13-450d-830a-0e0e7d51fe9d\") " Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.701243 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-public-tls-certs\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.701314 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-internal-tls-certs\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.701361 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-run-httpd\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.701394 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-log-httpd\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.701435 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-combined-ca-bundle\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.701458 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-config-data\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.701473 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6xwg\" (UniqueName: \"kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-kube-api-access-w6xwg\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.701496 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-etc-swift\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.713165 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-run-httpd\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.713554 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-log-httpd\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.714574 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b497134-1c13-450d-830a-0e0e7d51fe9d-logs" (OuterVolumeSpecName: "logs") pod "9b497134-1c13-450d-830a-0e0e7d51fe9d" (UID: "9b497134-1c13-450d-830a-0e0e7d51fe9d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.716635 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-688f5555d8-5fnpx_6340b4f1-4797-40ed-aaaa-a37e9c0cd649/neutron-httpd/0.log" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.716819 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-etc-swift\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.719122 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-688f5555d8-5fnpx_6340b4f1-4797-40ed-aaaa-a37e9c0cd649/neutron-api/0.log" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.719163 4751 generic.go:334] "Generic (PLEG): container finished" podID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" containerID="bbfa9255e0aa1d090382b57faaa3d6afdae2ae8c3686c5f384c263f680ed66cc" exitCode=137 Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.719180 4751 generic.go:334] "Generic (PLEG): container finished" podID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" containerID="5ba7e76371df6100c4852bd8675183d313d8bddc2add76d62703958cd240ec1f" exitCode=137 Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.719238 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-688f5555d8-5fnpx" event={"ID":"6340b4f1-4797-40ed-aaaa-a37e9c0cd649","Type":"ContainerDied","Data":"bbfa9255e0aa1d090382b57faaa3d6afdae2ae8c3686c5f384c263f680ed66cc"} Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.719265 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-688f5555d8-5fnpx" event={"ID":"6340b4f1-4797-40ed-aaaa-a37e9c0cd649","Type":"ContainerDied","Data":"5ba7e76371df6100c4852bd8675183d313d8bddc2add76d62703958cd240ec1f"} Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.721932 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Feb 27 16:47:56 crc kubenswrapper[4751]: E0227 16:47:56.722447 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b497134-1c13-450d-830a-0e0e7d51fe9d" containerName="placement-api" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.722465 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b497134-1c13-450d-830a-0e0e7d51fe9d" containerName="placement-api" Feb 27 16:47:56 crc kubenswrapper[4751]: E0227 16:47:56.722519 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b497134-1c13-450d-830a-0e0e7d51fe9d" containerName="placement-log" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.722526 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b497134-1c13-450d-830a-0e0e7d51fe9d" containerName="placement-log" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.722762 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b497134-1c13-450d-830a-0e0e7d51fe9d" containerName="placement-api" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.722791 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b497134-1c13-450d-830a-0e0e7d51fe9d" containerName="placement-log" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.727727 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.727863 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-config-data\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.737223 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.737427 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-9zxsn" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.737538 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.748638 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b497134-1c13-450d-830a-0e0e7d51fe9d-kube-api-access-s2sbf" (OuterVolumeSpecName: "kube-api-access-s2sbf") pod "9b497134-1c13-450d-830a-0e0e7d51fe9d" (UID: "9b497134-1c13-450d-830a-0e0e7d51fe9d"). InnerVolumeSpecName "kube-api-access-s2sbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.752493 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-combined-ca-bundle\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.761145 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-internal-tls-certs\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.761417 4751 generic.go:334] "Generic (PLEG): container finished" podID="9b497134-1c13-450d-830a-0e0e7d51fe9d" containerID="d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d" exitCode=0 Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.761541 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c7b6db6d-k4vfr" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.761565 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c7b6db6d-k4vfr" event={"ID":"9b497134-1c13-450d-830a-0e0e7d51fe9d","Type":"ContainerDied","Data":"d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d"} Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.761637 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c7b6db6d-k4vfr" event={"ID":"9b497134-1c13-450d-830a-0e0e7d51fe9d","Type":"ContainerDied","Data":"1dce7a56d726d7c785893b9517187158373995084dd91716bbfb1258da763af7"} Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.761659 4751 scope.go:117] "RemoveContainer" containerID="d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.762867 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6xwg\" (UniqueName: \"kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-kube-api-access-w6xwg\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.763729 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-scripts" (OuterVolumeSpecName: "scripts") pod "9b497134-1c13-450d-830a-0e0e7d51fe9d" (UID: "9b497134-1c13-450d-830a-0e0e7d51fe9d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.772078 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"23d98e0b-8d21-4ad9-b3a4-716c1d221949","Type":"ContainerStarted","Data":"2ecbec27a7197208f58327c1b614eb58bf364a81b50228cd0b0b7068505b1049"} Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.803366 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.803394 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2sbf\" (UniqueName: \"kubernetes.io/projected/9b497134-1c13-450d-830a-0e0e7d51fe9d-kube-api-access-s2sbf\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.803420 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b497134-1c13-450d-830a-0e0e7d51fe9d-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.804736 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.817105 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.817089428 podStartE2EDuration="3.817089428s" podCreationTimestamp="2026-02-27 16:47:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:56.802024345 +0000 UTC m=+1438.949038792" watchObservedRunningTime="2026-02-27 16:47:56.817089428 +0000 UTC m=+1438.964103875" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.838669 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-public-tls-certs\") pod \"swift-proxy-ccb964dc9-cj74q\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.873198 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.880493 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-config-data" (OuterVolumeSpecName: "config-data") pod "9b497134-1c13-450d-830a-0e0e7d51fe9d" (UID: "9b497134-1c13-450d-830a-0e0e7d51fe9d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.909804 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dv2hj\" (UniqueName: \"kubernetes.io/projected/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-kube-api-access-dv2hj\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.909940 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.910002 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config-secret\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.910085 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.910196 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.935475 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b497134-1c13-450d-830a-0e0e7d51fe9d" (UID: "9b497134-1c13-450d-830a-0e0e7d51fe9d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:56 crc kubenswrapper[4751]: I0227 16:47:56.974931 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:56.998342 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.017479 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.018152 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dv2hj\" (UniqueName: \"kubernetes.io/projected/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-kube-api-access-dv2hj\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.018242 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.018284 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config-secret\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.018346 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.018497 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.018617 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.019682 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.021640 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: E0227 16:47:57.022668 4751 projected.go:194] Error preparing data for projected volume kube-api-access-dv2hj for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (ed09f7f0-eafe-48c5-8124-ca6c48b2352e) does not match the UID in record. The object might have been deleted and then recreated Feb 27 16:47:57 crc kubenswrapper[4751]: E0227 16:47:57.022759 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-kube-api-access-dv2hj podName:ed09f7f0-eafe-48c5-8124-ca6c48b2352e nodeName:}" failed. No retries permitted until 2026-02-27 16:47:57.522739891 +0000 UTC m=+1439.669754338 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-dv2hj" (UniqueName: "kubernetes.io/projected/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-kube-api-access-dv2hj") pod "openstackclient" (UID: "ed09f7f0-eafe-48c5-8124-ca6c48b2352e") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (ed09f7f0-eafe-48c5-8124-ca6c48b2352e) does not match the UID in record. The object might have been deleted and then recreated Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.035803 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.047874 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config-secret\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.059130 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "9b497134-1c13-450d-830a-0e0e7d51fe9d" (UID: "9b497134-1c13-450d-830a-0e0e7d51fe9d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:57 crc kubenswrapper[4751]: E0227 16:47:57.082039 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-dv2hj], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="ed09f7f0-eafe-48c5-8124-ca6c48b2352e" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.088346 4751 scope.go:117] "RemoveContainer" containerID="ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.109010 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-688f5555d8-5fnpx_6340b4f1-4797-40ed-aaaa-a37e9c0cd649/neutron-httpd/0.log" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.122540 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config-secret\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.122625 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jm8k\" (UniqueName: \"kubernetes.io/projected/176ca33f-0a66-4132-bdf1-4be84eba5b34-kube-api-access-8jm8k\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.122656 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-combined-ca-bundle\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.122725 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.122769 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.123056 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-688f5555d8-5fnpx_6340b4f1-4797-40ed-aaaa-a37e9c0cd649/neutron-api/0.log" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.123123 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.161122 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "9b497134-1c13-450d-830a-0e0e7d51fe9d" (UID: "9b497134-1c13-450d-830a-0e0e7d51fe9d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.161281 4751 scope.go:117] "RemoveContainer" containerID="d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d" Feb 27 16:47:57 crc kubenswrapper[4751]: E0227 16:47:57.166372 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d\": container with ID starting with d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d not found: ID does not exist" containerID="d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.166425 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d"} err="failed to get container status \"d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d\": rpc error: code = NotFound desc = could not find container \"d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d\": container with ID starting with d19a09bb3b1f70cc20a18d01058d0ff753c152ba1f6571f06895c1cb72184b1d not found: ID does not exist" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.166448 4751 scope.go:117] "RemoveContainer" containerID="ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e" Feb 27 16:47:57 crc kubenswrapper[4751]: E0227 16:47:57.166986 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e\": container with ID starting with ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e not found: ID does not exist" containerID="ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.167006 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e"} err="failed to get container status \"ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e\": rpc error: code = NotFound desc = could not find container \"ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e\": container with ID starting with ac69e5335b42e47a46aedf872ab39c51b4ad64fca410faa17d57c61e9ea2ae1e not found: ID does not exist" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.225631 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-httpd-config\") pod \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.225714 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-ovndb-tls-certs\") pod \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.225741 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxbvv\" (UniqueName: \"kubernetes.io/projected/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-kube-api-access-vxbvv\") pod \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.225797 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-config\") pod \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.225864 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-combined-ca-bundle\") pod \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\" (UID: \"6340b4f1-4797-40ed-aaaa-a37e9c0cd649\") " Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.226279 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config-secret\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.226413 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jm8k\" (UniqueName: \"kubernetes.io/projected/176ca33f-0a66-4132-bdf1-4be84eba5b34-kube-api-access-8jm8k\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.226453 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-combined-ca-bundle\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.226533 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.226987 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b497134-1c13-450d-830a-0e0e7d51fe9d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.227987 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.234964 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config-secret\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.237251 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-kube-api-access-vxbvv" (OuterVolumeSpecName: "kube-api-access-vxbvv") pod "6340b4f1-4797-40ed-aaaa-a37e9c0cd649" (UID: "6340b4f1-4797-40ed-aaaa-a37e9c0cd649"). InnerVolumeSpecName "kube-api-access-vxbvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.243546 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "6340b4f1-4797-40ed-aaaa-a37e9c0cd649" (UID: "6340b4f1-4797-40ed-aaaa-a37e9c0cd649"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.252079 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-combined-ca-bundle\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.264525 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jm8k\" (UniqueName: \"kubernetes.io/projected/176ca33f-0a66-4132-bdf1-4be84eba5b34-kube-api-access-8jm8k\") pod \"openstackclient\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.328626 4751 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-httpd-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.328660 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxbvv\" (UniqueName: \"kubernetes.io/projected/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-kube-api-access-vxbvv\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.334683 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "6340b4f1-4797-40ed-aaaa-a37e9c0cd649" (UID: "6340b4f1-4797-40ed-aaaa-a37e9c0cd649"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.335313 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.342175 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6340b4f1-4797-40ed-aaaa-a37e9c0cd649" (UID: "6340b4f1-4797-40ed-aaaa-a37e9c0cd649"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.350315 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-config" (OuterVolumeSpecName: "config") pod "6340b4f1-4797-40ed-aaaa-a37e9c0cd649" (UID: "6340b4f1-4797-40ed-aaaa-a37e9c0cd649"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.407836 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.417891 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-c7b6db6d-k4vfr"] Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.428148 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-c7b6db6d-k4vfr"] Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.430803 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.430826 4751 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.430836 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/6340b4f1-4797-40ed-aaaa-a37e9c0cd649-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.532587 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dv2hj\" (UniqueName: \"kubernetes.io/projected/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-kube-api-access-dv2hj\") pod \"openstackclient\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: E0227 16:47:57.534918 4751 projected.go:194] Error preparing data for projected volume kube-api-access-dv2hj for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (ed09f7f0-eafe-48c5-8124-ca6c48b2352e) does not match the UID in record. The object might have been deleted and then recreated Feb 27 16:47:57 crc kubenswrapper[4751]: E0227 16:47:57.534958 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-kube-api-access-dv2hj podName:ed09f7f0-eafe-48c5-8124-ca6c48b2352e nodeName:}" failed. No retries permitted until 2026-02-27 16:47:58.534945524 +0000 UTC m=+1440.681959971 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-dv2hj" (UniqueName: "kubernetes.io/projected/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-kube-api-access-dv2hj") pod "openstackclient" (UID: "ed09f7f0-eafe-48c5-8124-ca6c48b2352e") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (ed09f7f0-eafe-48c5-8124-ca6c48b2352e) does not match the UID in record. The object might have been deleted and then recreated Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.557408 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-ccb964dc9-cj74q"] Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.791054 4751 generic.go:334] "Generic (PLEG): container finished" podID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" containerID="76b78dde5323d728cf0176c3ec46089efcdee6853d62d69a7eea78cb609893bd" exitCode=0 Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.792235 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78bc7f9d5b-gzgqp" event={"ID":"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe","Type":"ContainerDied","Data":"76b78dde5323d728cf0176c3ec46089efcdee6853d62d69a7eea78cb609893bd"} Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.805172 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ccb964dc9-cj74q" event={"ID":"ef465c53-5add-41ff-9fcc-00e714bc2bc0","Type":"ContainerStarted","Data":"6172eb80202c750be3ee41ecfc973c9f136b8ecf00b92047157e23d3dc01529f"} Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.815202 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-688f5555d8-5fnpx_6340b4f1-4797-40ed-aaaa-a37e9c0cd649/neutron-httpd/0.log" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.819442 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-688f5555d8-5fnpx_6340b4f1-4797-40ed-aaaa-a37e9c0cd649/neutron-api/0.log" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.819685 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-688f5555d8-5fnpx" event={"ID":"6340b4f1-4797-40ed-aaaa-a37e9c0cd649","Type":"ContainerDied","Data":"5476f9e5d47ee9749395d754923eebab918851f6cec44079faca0b1021e3062d"} Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.819803 4751 scope.go:117] "RemoveContainer" containerID="bbfa9255e0aa1d090382b57faaa3d6afdae2ae8c3686c5f384c263f680ed66cc" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.819993 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-688f5555d8-5fnpx" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.858978 4751 generic.go:334] "Generic (PLEG): container finished" podID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerID="729ce1054b27f73b6a024fc676fad5540516a8af8874ef070a3dca545c4854b9" exitCode=0 Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.859214 4751 generic.go:334] "Generic (PLEG): container finished" podID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerID="4c2ba9196fe61f9a742bb05acc42e633d0308530bb30967e5c81e901a8783cc1" exitCode=2 Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.859327 4751 generic.go:334] "Generic (PLEG): container finished" podID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerID="017219752f262c04392a06c79b3e5b23984099b62d57a9ab1a8e9564ebfc5f9a" exitCode=0 Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.859431 4751 generic.go:334] "Generic (PLEG): container finished" podID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerID="e70c5a9c24c4c153e83571094fa186b2ff0ea4a949287b7b9727cb596b621d80" exitCode=0 Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.859613 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.860285 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cd8468ed-ddbc-411a-9d7c-931e4962aed7","Type":"ContainerDied","Data":"729ce1054b27f73b6a024fc676fad5540516a8af8874ef070a3dca545c4854b9"} Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.860495 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cd8468ed-ddbc-411a-9d7c-931e4962aed7","Type":"ContainerDied","Data":"4c2ba9196fe61f9a742bb05acc42e633d0308530bb30967e5c81e901a8783cc1"} Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.860646 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cd8468ed-ddbc-411a-9d7c-931e4962aed7","Type":"ContainerDied","Data":"017219752f262c04392a06c79b3e5b23984099b62d57a9ab1a8e9564ebfc5f9a"} Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.860739 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cd8468ed-ddbc-411a-9d7c-931e4962aed7","Type":"ContainerDied","Data":"e70c5a9c24c4c153e83571094fa186b2ff0ea4a949287b7b9727cb596b621d80"} Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.868666 4751 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ed09f7f0-eafe-48c5-8124-ca6c48b2352e" podUID="176ca33f-0a66-4132-bdf1-4be84eba5b34" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.905958 4751 scope.go:117] "RemoveContainer" containerID="5ba7e76371df6100c4852bd8675183d313d8bddc2add76d62703958cd240ec1f" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.921813 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.925231 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.943566 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-688f5555d8-5fnpx"] Feb 27 16:47:57 crc kubenswrapper[4751]: I0227 16:47:57.950579 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-688f5555d8-5fnpx"] Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.045977 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-log-httpd\") pod \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.046024 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-scripts\") pod \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.046045 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config\") pod \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.046066 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-config-data\") pod \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.046129 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-sg-core-conf-yaml\") pod \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.046156 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-run-httpd\") pod \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.046202 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-combined-ca-bundle\") pod \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.046222 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config-secret\") pod \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\" (UID: \"ed09f7f0-eafe-48c5-8124-ca6c48b2352e\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.046254 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-662jh\" (UniqueName: \"kubernetes.io/projected/cd8468ed-ddbc-411a-9d7c-931e4962aed7-kube-api-access-662jh\") pod \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.046334 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-combined-ca-bundle\") pod \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\" (UID: \"cd8468ed-ddbc-411a-9d7c-931e4962aed7\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.046746 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dv2hj\" (UniqueName: \"kubernetes.io/projected/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-kube-api-access-dv2hj\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.052830 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.058119 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cd8468ed-ddbc-411a-9d7c-931e4962aed7" (UID: "cd8468ed-ddbc-411a-9d7c-931e4962aed7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.060050 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cd8468ed-ddbc-411a-9d7c-931e4962aed7" (UID: "cd8468ed-ddbc-411a-9d7c-931e4962aed7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.060223 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "ed09f7f0-eafe-48c5-8124-ca6c48b2352e" (UID: "ed09f7f0-eafe-48c5-8124-ca6c48b2352e"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.065895 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed09f7f0-eafe-48c5-8124-ca6c48b2352e" (UID: "ed09f7f0-eafe-48c5-8124-ca6c48b2352e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.067617 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-scripts" (OuterVolumeSpecName: "scripts") pod "cd8468ed-ddbc-411a-9d7c-931e4962aed7" (UID: "cd8468ed-ddbc-411a-9d7c-931e4962aed7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.076578 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd8468ed-ddbc-411a-9d7c-931e4962aed7-kube-api-access-662jh" (OuterVolumeSpecName: "kube-api-access-662jh") pod "cd8468ed-ddbc-411a-9d7c-931e4962aed7" (UID: "cd8468ed-ddbc-411a-9d7c-931e4962aed7"). InnerVolumeSpecName "kube-api-access-662jh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.083473 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "ed09f7f0-eafe-48c5-8124-ca6c48b2352e" (UID: "ed09f7f0-eafe-48c5-8124-ca6c48b2352e"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.148232 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "cd8468ed-ddbc-411a-9d7c-931e4962aed7" (UID: "cd8468ed-ddbc-411a-9d7c-931e4962aed7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.172644 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.177119 4751 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.184527 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-662jh\" (UniqueName: \"kubernetes.io/projected/cd8468ed-ddbc-411a-9d7c-931e4962aed7-kube-api-access-662jh\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.184657 4751 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.184731 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.186162 4751 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ed09f7f0-eafe-48c5-8124-ca6c48b2352e-openstack-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.186252 4751 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.186318 4751 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cd8468ed-ddbc-411a-9d7c-931e4962aed7-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.192259 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd8468ed-ddbc-411a-9d7c-931e4962aed7" (UID: "cd8468ed-ddbc-411a-9d7c-931e4962aed7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.288507 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.308530 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-config-data" (OuterVolumeSpecName: "config-data") pod "cd8468ed-ddbc-411a-9d7c-931e4962aed7" (UID: "cd8468ed-ddbc-411a-9d7c-931e4962aed7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.389705 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd8468ed-ddbc-411a-9d7c-931e4962aed7-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.525088 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.529607 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" path="/var/lib/kubelet/pods/6340b4f1-4797-40ed-aaaa-a37e9c0cd649/volumes" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.530210 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b497134-1c13-450d-830a-0e0e7d51fe9d" path="/var/lib/kubelet/pods/9b497134-1c13-450d-830a-0e0e7d51fe9d/volumes" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.530836 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed09f7f0-eafe-48c5-8124-ca6c48b2352e" path="/var/lib/kubelet/pods/ed09f7f0-eafe-48c5-8124-ca6c48b2352e/volumes" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.593718 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-config\") pod \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.594092 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-httpd-config\") pod \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.594248 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-combined-ca-bundle\") pod \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.594351 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-ovndb-tls-certs\") pod \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.594496 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49fvw\" (UniqueName: \"kubernetes.io/projected/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-kube-api-access-49fvw\") pod \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\" (UID: \"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe\") " Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.600136 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-kube-api-access-49fvw" (OuterVolumeSpecName: "kube-api-access-49fvw") pod "1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" (UID: "1b23ce3a-f761-42a6-a08c-34f81bd2a8fe"). InnerVolumeSpecName "kube-api-access-49fvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.608168 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" (UID: "1b23ce3a-f761-42a6-a08c-34f81bd2a8fe"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.653498 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" (UID: "1b23ce3a-f761-42a6-a08c-34f81bd2a8fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.696429 4751 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-httpd-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.696677 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.696713 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49fvw\" (UniqueName: \"kubernetes.io/projected/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-kube-api-access-49fvw\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.698539 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" (UID: "1b23ce3a-f761-42a6-a08c-34f81bd2a8fe"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.730533 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-config" (OuterVolumeSpecName: "config") pod "1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" (UID: "1b23ce3a-f761-42a6-a08c-34f81bd2a8fe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.799274 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.799312 4751 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.872559 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-78bc7f9d5b-gzgqp" event={"ID":"1b23ce3a-f761-42a6-a08c-34f81bd2a8fe","Type":"ContainerDied","Data":"9851b66786bceae07be853a8e8f0a1b51ea2084c8757422aaa819e630d62e1d4"} Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.872608 4751 scope.go:117] "RemoveContainer" containerID="627b5af683b614bccc6f8a8ba3d9f8d587a67e596d3eda586ddd8cd0c89ca727" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.872734 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-78bc7f9d5b-gzgqp" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.883374 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ccb964dc9-cj74q" event={"ID":"ef465c53-5add-41ff-9fcc-00e714bc2bc0","Type":"ContainerStarted","Data":"668b3715f8d8476a3e1d9d7443b5adb7e8ae4b4b6eac2c5be4a3dc6216b3c24f"} Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.883443 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ccb964dc9-cj74q" event={"ID":"ef465c53-5add-41ff-9fcc-00e714bc2bc0","Type":"ContainerStarted","Data":"843802b514320212732f6a6e3503b615909bac4f2d8f4d4458b80f0b1046f521"} Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.883639 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.883666 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.891863 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"176ca33f-0a66-4132-bdf1-4be84eba5b34","Type":"ContainerStarted","Data":"625bff4cc80cb2c66273975019f2115f60a8551f8bfcfd9f6c97ecc2ec9d0ae3"} Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.908013 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-ccb964dc9-cj74q" podStartSLOduration=2.907995912 podStartE2EDuration="2.907995912s" podCreationTimestamp="2026-02-27 16:47:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:47:58.90380978 +0000 UTC m=+1441.050824227" watchObservedRunningTime="2026-02-27 16:47:58.907995912 +0000 UTC m=+1441.055010359" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.925727 4751 scope.go:117] "RemoveContainer" containerID="76b78dde5323d728cf0176c3ec46089efcdee6853d62d69a7eea78cb609893bd" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.928362 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-78bc7f9d5b-gzgqp"] Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.935143 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-78bc7f9d5b-gzgqp"] Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.946606 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.946637 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.946676 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cd8468ed-ddbc-411a-9d7c-931e4962aed7","Type":"ContainerDied","Data":"0e5afbc0f8cf3a0e0d85f77879d2485c73077546e61a791ee8c07cdfe948bba1"} Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.970281 4751 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ed09f7f0-eafe-48c5-8124-ca6c48b2352e" podUID="176ca33f-0a66-4132-bdf1-4be84eba5b34" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.976433 4751 scope.go:117] "RemoveContainer" containerID="729ce1054b27f73b6a024fc676fad5540516a8af8874ef070a3dca545c4854b9" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.980647 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.992907 4751 scope.go:117] "RemoveContainer" containerID="4c2ba9196fe61f9a742bb05acc42e633d0308530bb30967e5c81e901a8783cc1" Feb 27 16:47:58 crc kubenswrapper[4751]: I0227 16:47:58.994979 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.005298 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:59 crc kubenswrapper[4751]: E0227 16:47:59.005714 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="ceilometer-central-agent" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.005734 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="ceilometer-central-agent" Feb 27 16:47:59 crc kubenswrapper[4751]: E0227 16:47:59.005750 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="sg-core" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.005758 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="sg-core" Feb 27 16:47:59 crc kubenswrapper[4751]: E0227 16:47:59.005764 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="proxy-httpd" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.005771 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="proxy-httpd" Feb 27 16:47:59 crc kubenswrapper[4751]: E0227 16:47:59.005790 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" containerName="neutron-api" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.005798 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" containerName="neutron-api" Feb 27 16:47:59 crc kubenswrapper[4751]: E0227 16:47:59.005811 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" containerName="neutron-httpd" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.005819 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" containerName="neutron-httpd" Feb 27 16:47:59 crc kubenswrapper[4751]: E0227 16:47:59.005836 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="ceilometer-notification-agent" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.005843 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="ceilometer-notification-agent" Feb 27 16:47:59 crc kubenswrapper[4751]: E0227 16:47:59.005858 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" containerName="neutron-api" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.005864 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" containerName="neutron-api" Feb 27 16:47:59 crc kubenswrapper[4751]: E0227 16:47:59.005874 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" containerName="neutron-httpd" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.005880 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" containerName="neutron-httpd" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.006045 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" containerName="neutron-httpd" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.006056 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" containerName="neutron-httpd" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.006066 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="ceilometer-central-agent" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.006076 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6340b4f1-4797-40ed-aaaa-a37e9c0cd649" containerName="neutron-api" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.006092 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="sg-core" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.006105 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" containerName="neutron-api" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.006115 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="ceilometer-notification-agent" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.006127 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" containerName="proxy-httpd" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.007674 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.011498 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.011753 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.020520 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.040772 4751 scope.go:117] "RemoveContainer" containerID="017219752f262c04392a06c79b3e5b23984099b62d57a9ab1a8e9564ebfc5f9a" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.062038 4751 scope.go:117] "RemoveContainer" containerID="e70c5a9c24c4c153e83571094fa186b2ff0ea4a949287b7b9727cb596b621d80" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.104504 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.104574 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-config-data\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.104600 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg87x\" (UniqueName: \"kubernetes.io/projected/4606f586-a84c-41f8-bdae-13bba1e15c46-kube-api-access-tg87x\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.104621 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-scripts\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.104647 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-log-httpd\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.104659 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-run-httpd\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.104681 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.211133 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-config-data\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.211198 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg87x\" (UniqueName: \"kubernetes.io/projected/4606f586-a84c-41f8-bdae-13bba1e15c46-kube-api-access-tg87x\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.211244 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-scripts\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.211286 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-log-httpd\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.211310 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-run-httpd\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.211347 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.211552 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.212512 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-run-httpd\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.212842 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-log-httpd\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.215326 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-scripts\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.216023 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.216076 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.216722 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-config-data\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.243060 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg87x\" (UniqueName: \"kubernetes.io/projected/4606f586-a84c-41f8-bdae-13bba1e15c46-kube-api-access-tg87x\") pod \"ceilometer-0\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.281483 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.325927 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.778527 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.903793 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7659f547c6-hgdwt" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.166:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.903852 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7659f547c6-hgdwt" podUID="1e512657-d0e1-4289-b430-0fc78d20aca7" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.166:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 27 16:47:59 crc kubenswrapper[4751]: I0227 16:47:59.979651 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4606f586-a84c-41f8-bdae-13bba1e15c46","Type":"ContainerStarted","Data":"25408a6116e6172509fa9aacb7476496bfa280645cb87d34d7c1afc2b4aa6676"} Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.130745 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536848-k5qt8"] Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.136822 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536848-k5qt8" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.141238 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.141479 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.141584 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.150129 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536848-k5qt8"] Feb 27 16:48:00 crc kubenswrapper[4751]: E0227 16:48:00.163877 4751 log.go:32] "ImageFsInfo from image service failed" err="rpc error: code = Unknown desc = get image fs info unable to get usage for /var/lib/containers/storage/overlay-images: get disk usage for path /var/lib/containers/storage/overlay-images: lstat /var/lib/containers/storage/overlay-images/.tmp-images.json756896645: no such file or directory" Feb 27 16:48:00 crc kubenswrapper[4751]: E0227 16:48:00.163953 4751 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get imageFs stats: missing image stats: nil" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.333320 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzhbv\" (UniqueName: \"kubernetes.io/projected/9fae513e-32f8-4116-a05a-a4cbc62853ec-kube-api-access-tzhbv\") pod \"auto-csr-approver-29536848-k5qt8\" (UID: \"9fae513e-32f8-4116-a05a-a4cbc62853ec\") " pod="openshift-infra/auto-csr-approver-29536848-k5qt8" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.435118 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzhbv\" (UniqueName: \"kubernetes.io/projected/9fae513e-32f8-4116-a05a-a4cbc62853ec-kube-api-access-tzhbv\") pod \"auto-csr-approver-29536848-k5qt8\" (UID: \"9fae513e-32f8-4116-a05a-a4cbc62853ec\") " pod="openshift-infra/auto-csr-approver-29536848-k5qt8" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.455245 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzhbv\" (UniqueName: \"kubernetes.io/projected/9fae513e-32f8-4116-a05a-a4cbc62853ec-kube-api-access-tzhbv\") pod \"auto-csr-approver-29536848-k5qt8\" (UID: \"9fae513e-32f8-4116-a05a-a4cbc62853ec\") " pod="openshift-infra/auto-csr-approver-29536848-k5qt8" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.457439 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536848-k5qt8" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.535129 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b23ce3a-f761-42a6-a08c-34f81bd2a8fe" path="/var/lib/kubelet/pods/1b23ce3a-f761-42a6-a08c-34f81bd2a8fe/volumes" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.544528 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd8468ed-ddbc-411a-9d7c-931e4962aed7" path="/var/lib/kubelet/pods/cd8468ed-ddbc-411a-9d7c-931e4962aed7/volumes" Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.910533 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536848-k5qt8"] Feb 27 16:48:00 crc kubenswrapper[4751]: W0227 16:48:00.925340 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9fae513e_32f8_4116_a05a_a4cbc62853ec.slice/crio-699a425fe132a09d98777204c998c08c8e083a38cbfa181a911927db77077fc3 WatchSource:0}: Error finding container 699a425fe132a09d98777204c998c08c8e083a38cbfa181a911927db77077fc3: Status 404 returned error can't find the container with id 699a425fe132a09d98777204c998c08c8e083a38cbfa181a911927db77077fc3 Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.988132 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536848-k5qt8" event={"ID":"9fae513e-32f8-4116-a05a-a4cbc62853ec","Type":"ContainerStarted","Data":"699a425fe132a09d98777204c998c08c8e083a38cbfa181a911927db77077fc3"} Feb 27 16:48:00 crc kubenswrapper[4751]: I0227 16:48:00.990667 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4606f586-a84c-41f8-bdae-13bba1e15c46","Type":"ContainerStarted","Data":"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9"} Feb 27 16:48:02 crc kubenswrapper[4751]: I0227 16:48:02.012832 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4606f586-a84c-41f8-bdae-13bba1e15c46","Type":"ContainerStarted","Data":"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb"} Feb 27 16:48:02 crc kubenswrapper[4751]: I0227 16:48:02.012882 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4606f586-a84c-41f8-bdae-13bba1e15c46","Type":"ContainerStarted","Data":"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d"} Feb 27 16:48:03 crc kubenswrapper[4751]: I0227 16:48:03.025945 4751 generic.go:334] "Generic (PLEG): container finished" podID="9fae513e-32f8-4116-a05a-a4cbc62853ec" containerID="3201776196c961d8ac7518f71ad1a5faf58e200bef29f468b4b7c5610c3cb1c5" exitCode=0 Feb 27 16:48:03 crc kubenswrapper[4751]: I0227 16:48:03.026251 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536848-k5qt8" event={"ID":"9fae513e-32f8-4116-a05a-a4cbc62853ec","Type":"ContainerDied","Data":"3201776196c961d8ac7518f71ad1a5faf58e200bef29f468b4b7c5610c3cb1c5"} Feb 27 16:48:04 crc kubenswrapper[4751]: I0227 16:48:04.514186 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Feb 27 16:48:06 crc kubenswrapper[4751]: I0227 16:48:06.740995 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:06 crc kubenswrapper[4751]: I0227 16:48:06.880647 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:48:06 crc kubenswrapper[4751]: I0227 16:48:06.882595 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:48:09 crc kubenswrapper[4751]: I0227 16:48:09.459460 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536848-k5qt8" Feb 27 16:48:09 crc kubenswrapper[4751]: I0227 16:48:09.605442 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzhbv\" (UniqueName: \"kubernetes.io/projected/9fae513e-32f8-4116-a05a-a4cbc62853ec-kube-api-access-tzhbv\") pod \"9fae513e-32f8-4116-a05a-a4cbc62853ec\" (UID: \"9fae513e-32f8-4116-a05a-a4cbc62853ec\") " Feb 27 16:48:09 crc kubenswrapper[4751]: I0227 16:48:09.610787 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fae513e-32f8-4116-a05a-a4cbc62853ec-kube-api-access-tzhbv" (OuterVolumeSpecName: "kube-api-access-tzhbv") pod "9fae513e-32f8-4116-a05a-a4cbc62853ec" (UID: "9fae513e-32f8-4116-a05a-a4cbc62853ec"). InnerVolumeSpecName "kube-api-access-tzhbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:09 crc kubenswrapper[4751]: I0227 16:48:09.707887 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzhbv\" (UniqueName: \"kubernetes.io/projected/9fae513e-32f8-4116-a05a-a4cbc62853ec-kube-api-access-tzhbv\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.096650 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"176ca33f-0a66-4132-bdf1-4be84eba5b34","Type":"ContainerStarted","Data":"92998f2cdf2e9c4313aa0a3ab7697b4d34436e6d3fd22cb366615eb354f3f91a"} Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.099523 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4606f586-a84c-41f8-bdae-13bba1e15c46","Type":"ContainerStarted","Data":"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5"} Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.099737 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="ceilometer-central-agent" containerID="cri-o://7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9" gracePeriod=30 Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.100032 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.100107 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="proxy-httpd" containerID="cri-o://d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5" gracePeriod=30 Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.100181 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="sg-core" containerID="cri-o://dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb" gracePeriod=30 Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.100241 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="ceilometer-notification-agent" containerID="cri-o://0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d" gracePeriod=30 Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.105060 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536848-k5qt8" event={"ID":"9fae513e-32f8-4116-a05a-a4cbc62853ec","Type":"ContainerDied","Data":"699a425fe132a09d98777204c998c08c8e083a38cbfa181a911927db77077fc3"} Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.105115 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="699a425fe132a09d98777204c998c08c8e083a38cbfa181a911927db77077fc3" Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.105116 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536848-k5qt8" Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.137545 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.027889955 podStartE2EDuration="14.137521876s" podCreationTimestamp="2026-02-27 16:47:56 +0000 UTC" firstStartedPulling="2026-02-27 16:47:58.094627755 +0000 UTC m=+1440.241642202" lastFinishedPulling="2026-02-27 16:48:09.204259666 +0000 UTC m=+1451.351274123" observedRunningTime="2026-02-27 16:48:10.12193975 +0000 UTC m=+1452.268954207" watchObservedRunningTime="2026-02-27 16:48:10.137521876 +0000 UTC m=+1452.284536333" Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.162593 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.7684834 podStartE2EDuration="12.162569845s" podCreationTimestamp="2026-02-27 16:47:58 +0000 UTC" firstStartedPulling="2026-02-27 16:47:59.790851745 +0000 UTC m=+1441.937866192" lastFinishedPulling="2026-02-27 16:48:09.1849382 +0000 UTC m=+1451.331952637" observedRunningTime="2026-02-27 16:48:10.149125086 +0000 UTC m=+1452.296139533" watchObservedRunningTime="2026-02-27 16:48:10.162569845 +0000 UTC m=+1452.309584292" Feb 27 16:48:10 crc kubenswrapper[4751]: E0227 16:48:10.389929 4751 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4606f586_a84c_41f8_bdae_13bba1e15c46.slice/crio-d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4606f586_a84c_41f8_bdae_13bba1e15c46.slice/crio-conmon-d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5.scope\": RecentStats: unable to find data in memory cache]" Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.545410 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536842-nq5mv"] Feb 27 16:48:10 crc kubenswrapper[4751]: I0227 16:48:10.560915 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536842-nq5mv"] Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.049994 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.148790 4751 generic.go:334] "Generic (PLEG): container finished" podID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerID="d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5" exitCode=0 Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.148834 4751 generic.go:334] "Generic (PLEG): container finished" podID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerID="dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb" exitCode=2 Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.148845 4751 generic.go:334] "Generic (PLEG): container finished" podID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerID="0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d" exitCode=0 Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.148855 4751 generic.go:334] "Generic (PLEG): container finished" podID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerID="7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9" exitCode=0 Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.149697 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.149795 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4606f586-a84c-41f8-bdae-13bba1e15c46","Type":"ContainerDied","Data":"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5"} Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.149828 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4606f586-a84c-41f8-bdae-13bba1e15c46","Type":"ContainerDied","Data":"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb"} Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.149839 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4606f586-a84c-41f8-bdae-13bba1e15c46","Type":"ContainerDied","Data":"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d"} Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.149850 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4606f586-a84c-41f8-bdae-13bba1e15c46","Type":"ContainerDied","Data":"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9"} Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.149859 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4606f586-a84c-41f8-bdae-13bba1e15c46","Type":"ContainerDied","Data":"25408a6116e6172509fa9aacb7476496bfa280645cb87d34d7c1afc2b4aa6676"} Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.149874 4751 scope.go:117] "RemoveContainer" containerID="d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.179948 4751 scope.go:117] "RemoveContainer" containerID="dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.198305 4751 scope.go:117] "RemoveContainer" containerID="0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.218593 4751 scope.go:117] "RemoveContainer" containerID="7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.234913 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-scripts\") pod \"4606f586-a84c-41f8-bdae-13bba1e15c46\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.234977 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-config-data\") pod \"4606f586-a84c-41f8-bdae-13bba1e15c46\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.235041 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-sg-core-conf-yaml\") pod \"4606f586-a84c-41f8-bdae-13bba1e15c46\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.235123 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-combined-ca-bundle\") pod \"4606f586-a84c-41f8-bdae-13bba1e15c46\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.235177 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-log-httpd\") pod \"4606f586-a84c-41f8-bdae-13bba1e15c46\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.235205 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-run-httpd\") pod \"4606f586-a84c-41f8-bdae-13bba1e15c46\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.235235 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tg87x\" (UniqueName: \"kubernetes.io/projected/4606f586-a84c-41f8-bdae-13bba1e15c46-kube-api-access-tg87x\") pod \"4606f586-a84c-41f8-bdae-13bba1e15c46\" (UID: \"4606f586-a84c-41f8-bdae-13bba1e15c46\") " Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.237456 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4606f586-a84c-41f8-bdae-13bba1e15c46" (UID: "4606f586-a84c-41f8-bdae-13bba1e15c46"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.238655 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4606f586-a84c-41f8-bdae-13bba1e15c46" (UID: "4606f586-a84c-41f8-bdae-13bba1e15c46"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.241674 4751 scope.go:117] "RemoveContainer" containerID="d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.242682 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-scripts" (OuterVolumeSpecName: "scripts") pod "4606f586-a84c-41f8-bdae-13bba1e15c46" (UID: "4606f586-a84c-41f8-bdae-13bba1e15c46"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:11 crc kubenswrapper[4751]: E0227 16:48:11.242821 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5\": container with ID starting with d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5 not found: ID does not exist" containerID="d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.242861 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5"} err="failed to get container status \"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5\": rpc error: code = NotFound desc = could not find container \"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5\": container with ID starting with d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5 not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.242901 4751 scope.go:117] "RemoveContainer" containerID="dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb" Feb 27 16:48:11 crc kubenswrapper[4751]: E0227 16:48:11.243554 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb\": container with ID starting with dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb not found: ID does not exist" containerID="dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.243621 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb"} err="failed to get container status \"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb\": rpc error: code = NotFound desc = could not find container \"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb\": container with ID starting with dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.243652 4751 scope.go:117] "RemoveContainer" containerID="0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d" Feb 27 16:48:11 crc kubenswrapper[4751]: E0227 16:48:11.245794 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d\": container with ID starting with 0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d not found: ID does not exist" containerID="0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.245825 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d"} err="failed to get container status \"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d\": rpc error: code = NotFound desc = could not find container \"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d\": container with ID starting with 0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.245844 4751 scope.go:117] "RemoveContainer" containerID="7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9" Feb 27 16:48:11 crc kubenswrapper[4751]: E0227 16:48:11.247020 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9\": container with ID starting with 7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9 not found: ID does not exist" containerID="7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.247042 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9"} err="failed to get container status \"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9\": rpc error: code = NotFound desc = could not find container \"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9\": container with ID starting with 7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9 not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.247057 4751 scope.go:117] "RemoveContainer" containerID="d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.247513 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5"} err="failed to get container status \"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5\": rpc error: code = NotFound desc = could not find container \"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5\": container with ID starting with d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5 not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.247533 4751 scope.go:117] "RemoveContainer" containerID="dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.247776 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb"} err="failed to get container status \"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb\": rpc error: code = NotFound desc = could not find container \"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb\": container with ID starting with dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.247792 4751 scope.go:117] "RemoveContainer" containerID="0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.248053 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d"} err="failed to get container status \"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d\": rpc error: code = NotFound desc = could not find container \"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d\": container with ID starting with 0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.248090 4751 scope.go:117] "RemoveContainer" containerID="7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.248342 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9"} err="failed to get container status \"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9\": rpc error: code = NotFound desc = could not find container \"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9\": container with ID starting with 7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9 not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.248360 4751 scope.go:117] "RemoveContainer" containerID="d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.248571 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5"} err="failed to get container status \"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5\": rpc error: code = NotFound desc = could not find container \"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5\": container with ID starting with d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5 not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.248588 4751 scope.go:117] "RemoveContainer" containerID="dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.248991 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb"} err="failed to get container status \"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb\": rpc error: code = NotFound desc = could not find container \"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb\": container with ID starting with dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.249013 4751 scope.go:117] "RemoveContainer" containerID="0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.249230 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d"} err="failed to get container status \"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d\": rpc error: code = NotFound desc = could not find container \"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d\": container with ID starting with 0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.249247 4751 scope.go:117] "RemoveContainer" containerID="7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.249471 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9"} err="failed to get container status \"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9\": rpc error: code = NotFound desc = could not find container \"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9\": container with ID starting with 7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9 not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.249488 4751 scope.go:117] "RemoveContainer" containerID="d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.249719 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5"} err="failed to get container status \"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5\": rpc error: code = NotFound desc = could not find container \"d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5\": container with ID starting with d54bd02a06b9e0307237312de84728f122600ea89af704f53c7083845a3b23f5 not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.249734 4751 scope.go:117] "RemoveContainer" containerID="dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.249943 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb"} err="failed to get container status \"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb\": rpc error: code = NotFound desc = could not find container \"dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb\": container with ID starting with dcb8d59d356b632ad0d7b78d8595280544efe5c4bb5d1b71e0c91e45f8a0d5bb not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.249962 4751 scope.go:117] "RemoveContainer" containerID="0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.250192 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d"} err="failed to get container status \"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d\": rpc error: code = NotFound desc = could not find container \"0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d\": container with ID starting with 0d4991488cb43a66309ec4da0390ed170cef5793efa5c4bdb824b3c264d62d5d not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.250207 4751 scope.go:117] "RemoveContainer" containerID="7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.250433 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9"} err="failed to get container status \"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9\": rpc error: code = NotFound desc = could not find container \"7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9\": container with ID starting with 7c3cf0310ba04b50f63dda3ff86ed68eb6e0d78071881e54b20ae89705826bc9 not found: ID does not exist" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.250633 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4606f586-a84c-41f8-bdae-13bba1e15c46-kube-api-access-tg87x" (OuterVolumeSpecName: "kube-api-access-tg87x") pod "4606f586-a84c-41f8-bdae-13bba1e15c46" (UID: "4606f586-a84c-41f8-bdae-13bba1e15c46"). InnerVolumeSpecName "kube-api-access-tg87x". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.265925 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4606f586-a84c-41f8-bdae-13bba1e15c46" (UID: "4606f586-a84c-41f8-bdae-13bba1e15c46"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.317158 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4606f586-a84c-41f8-bdae-13bba1e15c46" (UID: "4606f586-a84c-41f8-bdae-13bba1e15c46"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.328381 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-config-data" (OuterVolumeSpecName: "config-data") pod "4606f586-a84c-41f8-bdae-13bba1e15c46" (UID: "4606f586-a84c-41f8-bdae-13bba1e15c46"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.337710 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.337742 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.337753 4751 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.337763 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4606f586-a84c-41f8-bdae-13bba1e15c46-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.337773 4751 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.337781 4751 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4606f586-a84c-41f8-bdae-13bba1e15c46-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.337792 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tg87x\" (UniqueName: \"kubernetes.io/projected/4606f586-a84c-41f8-bdae-13bba1e15c46-kube-api-access-tg87x\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.480633 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.493219 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.525559 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:11 crc kubenswrapper[4751]: E0227 16:48:11.525920 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="proxy-httpd" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.525937 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="proxy-httpd" Feb 27 16:48:11 crc kubenswrapper[4751]: E0227 16:48:11.525946 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="sg-core" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.525961 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="sg-core" Feb 27 16:48:11 crc kubenswrapper[4751]: E0227 16:48:11.525969 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="ceilometer-notification-agent" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.525976 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="ceilometer-notification-agent" Feb 27 16:48:11 crc kubenswrapper[4751]: E0227 16:48:11.525988 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="ceilometer-central-agent" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.525994 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="ceilometer-central-agent" Feb 27 16:48:11 crc kubenswrapper[4751]: E0227 16:48:11.526008 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fae513e-32f8-4116-a05a-a4cbc62853ec" containerName="oc" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.526014 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fae513e-32f8-4116-a05a-a4cbc62853ec" containerName="oc" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.526175 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="sg-core" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.526189 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="ceilometer-notification-agent" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.526196 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="ceilometer-central-agent" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.526205 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fae513e-32f8-4116-a05a-a4cbc62853ec" containerName="oc" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.526211 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" containerName="proxy-httpd" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.527695 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.540374 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.540961 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.541316 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.643833 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.644004 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-run-httpd\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.644047 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.644200 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-882pc\" (UniqueName: \"kubernetes.io/projected/e6fe3337-3152-403c-946a-49fb365c99c5-kube-api-access-882pc\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.644374 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-config-data\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.644462 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-scripts\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.644680 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-log-httpd\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.746130 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-log-httpd\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.746522 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.746672 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-run-httpd\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.746825 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.747011 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-882pc\" (UniqueName: \"kubernetes.io/projected/e6fe3337-3152-403c-946a-49fb365c99c5-kube-api-access-882pc\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.746557 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-log-httpd\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.747087 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-run-httpd\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.747210 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-config-data\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.747622 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-scripts\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.749920 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.750055 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-config-data\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.751539 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.753263 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-scripts\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.783045 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-882pc\" (UniqueName: \"kubernetes.io/projected/e6fe3337-3152-403c-946a-49fb365c99c5-kube-api-access-882pc\") pod \"ceilometer-0\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " pod="openstack/ceilometer-0" Feb 27 16:48:11 crc kubenswrapper[4751]: I0227 16:48:11.845037 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:12 crc kubenswrapper[4751]: I0227 16:48:12.288925 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:12 crc kubenswrapper[4751]: I0227 16:48:12.529982 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4606f586-a84c-41f8-bdae-13bba1e15c46" path="/var/lib/kubelet/pods/4606f586-a84c-41f8-bdae-13bba1e15c46/volumes" Feb 27 16:48:12 crc kubenswrapper[4751]: I0227 16:48:12.531090 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d07cffe8-018d-43c5-80bc-c729dca39251" path="/var/lib/kubelet/pods/d07cffe8-018d-43c5-80bc-c729dca39251/volumes" Feb 27 16:48:13 crc kubenswrapper[4751]: I0227 16:48:13.178065 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6fe3337-3152-403c-946a-49fb365c99c5","Type":"ContainerStarted","Data":"38f9ad27baed35d56d749ed6e9803d5bf8fa23f102016cb9ae6cda807bd2efbe"} Feb 27 16:48:13 crc kubenswrapper[4751]: I0227 16:48:13.178381 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6fe3337-3152-403c-946a-49fb365c99c5","Type":"ContainerStarted","Data":"cfa8040f356dc7d0b68feb3d9fd946652ead9df27a3c7bb6755186b9fcd03032"} Feb 27 16:48:13 crc kubenswrapper[4751]: I0227 16:48:13.329229 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:15 crc kubenswrapper[4751]: I0227 16:48:15.197775 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6fe3337-3152-403c-946a-49fb365c99c5","Type":"ContainerStarted","Data":"39809915829e48c9315635956936ee8185eb8d13ee5704a93b241618189d1658"} Feb 27 16:48:16 crc kubenswrapper[4751]: I0227 16:48:16.208245 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6fe3337-3152-403c-946a-49fb365c99c5","Type":"ContainerStarted","Data":"ece3b53156d4bd583adf821f4cfa7a369365f855ee5afcc774722af014f079ba"} Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.042185 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-j2chq"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.044809 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-j2chq" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.050789 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-j2chq"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.166463 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctgx7\" (UniqueName: \"kubernetes.io/projected/d074bd48-85b6-4bcf-ad23-bb541f92984d-kube-api-access-ctgx7\") pod \"nova-api-db-create-j2chq\" (UID: \"d074bd48-85b6-4bcf-ad23-bb541f92984d\") " pod="openstack/nova-api-db-create-j2chq" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.166790 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d074bd48-85b6-4bcf-ad23-bb541f92984d-operator-scripts\") pod \"nova-api-db-create-j2chq\" (UID: \"d074bd48-85b6-4bcf-ad23-bb541f92984d\") " pod="openstack/nova-api-db-create-j2chq" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.230499 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6fe3337-3152-403c-946a-49fb365c99c5","Type":"ContainerStarted","Data":"e168ef42de8017878320ddf0c8de784688d870949ae6fa383c773c43becca118"} Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.230648 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="ceilometer-central-agent" containerID="cri-o://38f9ad27baed35d56d749ed6e9803d5bf8fa23f102016cb9ae6cda807bd2efbe" gracePeriod=30 Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.230703 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="sg-core" containerID="cri-o://ece3b53156d4bd583adf821f4cfa7a369365f855ee5afcc774722af014f079ba" gracePeriod=30 Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.230699 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="proxy-httpd" containerID="cri-o://e168ef42de8017878320ddf0c8de784688d870949ae6fa383c773c43becca118" gracePeriod=30 Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.230796 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="ceilometer-notification-agent" containerID="cri-o://39809915829e48c9315635956936ee8185eb8d13ee5704a93b241618189d1658" gracePeriod=30 Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.230936 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.242902 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-pnm6t"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.244285 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pnm6t" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.257303 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-c1fb-account-create-update-7gk8w"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.259111 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c1fb-account-create-update-7gk8w" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.260834 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.268645 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctgx7\" (UniqueName: \"kubernetes.io/projected/d074bd48-85b6-4bcf-ad23-bb541f92984d-kube-api-access-ctgx7\") pod \"nova-api-db-create-j2chq\" (UID: \"d074bd48-85b6-4bcf-ad23-bb541f92984d\") " pod="openstack/nova-api-db-create-j2chq" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.268783 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d074bd48-85b6-4bcf-ad23-bb541f92984d-operator-scripts\") pod \"nova-api-db-create-j2chq\" (UID: \"d074bd48-85b6-4bcf-ad23-bb541f92984d\") " pod="openstack/nova-api-db-create-j2chq" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.269152 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-pnm6t"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.269408 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d074bd48-85b6-4bcf-ad23-bb541f92984d-operator-scripts\") pod \"nova-api-db-create-j2chq\" (UID: \"d074bd48-85b6-4bcf-ad23-bb541f92984d\") " pod="openstack/nova-api-db-create-j2chq" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.276166 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c1fb-account-create-update-7gk8w"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.276694 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.465651549 podStartE2EDuration="7.276679736s" podCreationTimestamp="2026-02-27 16:48:11 +0000 UTC" firstStartedPulling="2026-02-27 16:48:12.306135916 +0000 UTC m=+1454.453150363" lastFinishedPulling="2026-02-27 16:48:17.117164073 +0000 UTC m=+1459.264178550" observedRunningTime="2026-02-27 16:48:18.264989374 +0000 UTC m=+1460.412003831" watchObservedRunningTime="2026-02-27 16:48:18.276679736 +0000 UTC m=+1460.423694173" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.292718 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctgx7\" (UniqueName: \"kubernetes.io/projected/d074bd48-85b6-4bcf-ad23-bb541f92984d-kube-api-access-ctgx7\") pod \"nova-api-db-create-j2chq\" (UID: \"d074bd48-85b6-4bcf-ad23-bb541f92984d\") " pod="openstack/nova-api-db-create-j2chq" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.363628 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-j2chq" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.370486 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkrgn\" (UniqueName: \"kubernetes.io/projected/bd714706-b63d-4d97-b9df-8ac662e9dfb0-kube-api-access-hkrgn\") pod \"nova-api-c1fb-account-create-update-7gk8w\" (UID: \"bd714706-b63d-4d97-b9df-8ac662e9dfb0\") " pod="openstack/nova-api-c1fb-account-create-update-7gk8w" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.370519 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b05cb31e-70e5-4e64-984a-6fa8053743de-operator-scripts\") pod \"nova-cell0-db-create-pnm6t\" (UID: \"b05cb31e-70e5-4e64-984a-6fa8053743de\") " pod="openstack/nova-cell0-db-create-pnm6t" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.370555 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd714706-b63d-4d97-b9df-8ac662e9dfb0-operator-scripts\") pod \"nova-api-c1fb-account-create-update-7gk8w\" (UID: \"bd714706-b63d-4d97-b9df-8ac662e9dfb0\") " pod="openstack/nova-api-c1fb-account-create-update-7gk8w" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.370625 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jthn\" (UniqueName: \"kubernetes.io/projected/b05cb31e-70e5-4e64-984a-6fa8053743de-kube-api-access-2jthn\") pod \"nova-cell0-db-create-pnm6t\" (UID: \"b05cb31e-70e5-4e64-984a-6fa8053743de\") " pod="openstack/nova-cell0-db-create-pnm6t" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.438752 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-h4kdv"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.440013 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h4kdv" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.450768 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-e4ff-account-create-update-5zkr9"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.452130 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.457281 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.460920 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-h4kdv"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.469510 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e4ff-account-create-update-5zkr9"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.477497 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkrgn\" (UniqueName: \"kubernetes.io/projected/bd714706-b63d-4d97-b9df-8ac662e9dfb0-kube-api-access-hkrgn\") pod \"nova-api-c1fb-account-create-update-7gk8w\" (UID: \"bd714706-b63d-4d97-b9df-8ac662e9dfb0\") " pod="openstack/nova-api-c1fb-account-create-update-7gk8w" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.477541 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b05cb31e-70e5-4e64-984a-6fa8053743de-operator-scripts\") pod \"nova-cell0-db-create-pnm6t\" (UID: \"b05cb31e-70e5-4e64-984a-6fa8053743de\") " pod="openstack/nova-cell0-db-create-pnm6t" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.477587 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd714706-b63d-4d97-b9df-8ac662e9dfb0-operator-scripts\") pod \"nova-api-c1fb-account-create-update-7gk8w\" (UID: \"bd714706-b63d-4d97-b9df-8ac662e9dfb0\") " pod="openstack/nova-api-c1fb-account-create-update-7gk8w" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.477668 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jthn\" (UniqueName: \"kubernetes.io/projected/b05cb31e-70e5-4e64-984a-6fa8053743de-kube-api-access-2jthn\") pod \"nova-cell0-db-create-pnm6t\" (UID: \"b05cb31e-70e5-4e64-984a-6fa8053743de\") " pod="openstack/nova-cell0-db-create-pnm6t" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.478584 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b05cb31e-70e5-4e64-984a-6fa8053743de-operator-scripts\") pod \"nova-cell0-db-create-pnm6t\" (UID: \"b05cb31e-70e5-4e64-984a-6fa8053743de\") " pod="openstack/nova-cell0-db-create-pnm6t" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.478599 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd714706-b63d-4d97-b9df-8ac662e9dfb0-operator-scripts\") pod \"nova-api-c1fb-account-create-update-7gk8w\" (UID: \"bd714706-b63d-4d97-b9df-8ac662e9dfb0\") " pod="openstack/nova-api-c1fb-account-create-update-7gk8w" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.502358 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jthn\" (UniqueName: \"kubernetes.io/projected/b05cb31e-70e5-4e64-984a-6fa8053743de-kube-api-access-2jthn\") pod \"nova-cell0-db-create-pnm6t\" (UID: \"b05cb31e-70e5-4e64-984a-6fa8053743de\") " pod="openstack/nova-cell0-db-create-pnm6t" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.502385 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkrgn\" (UniqueName: \"kubernetes.io/projected/bd714706-b63d-4d97-b9df-8ac662e9dfb0-kube-api-access-hkrgn\") pod \"nova-api-c1fb-account-create-update-7gk8w\" (UID: \"bd714706-b63d-4d97-b9df-8ac662e9dfb0\") " pod="openstack/nova-api-c1fb-account-create-update-7gk8w" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.576883 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pnm6t" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.578982 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfvsq\" (UniqueName: \"kubernetes.io/projected/e2c399bf-5a40-4e29-9056-60b030211a97-kube-api-access-cfvsq\") pod \"nova-cell0-e4ff-account-create-update-5zkr9\" (UID: \"e2c399bf-5a40-4e29-9056-60b030211a97\") " pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.579136 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-operator-scripts\") pod \"nova-cell1-db-create-h4kdv\" (UID: \"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7\") " pod="openstack/nova-cell1-db-create-h4kdv" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.579185 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c399bf-5a40-4e29-9056-60b030211a97-operator-scripts\") pod \"nova-cell0-e4ff-account-create-update-5zkr9\" (UID: \"e2c399bf-5a40-4e29-9056-60b030211a97\") " pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.579208 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmj59\" (UniqueName: \"kubernetes.io/projected/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-kube-api-access-cmj59\") pod \"nova-cell1-db-create-h4kdv\" (UID: \"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7\") " pod="openstack/nova-cell1-db-create-h4kdv" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.586197 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c1fb-account-create-update-7gk8w" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.654549 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-d540-account-create-update-s467z"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.655681 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d540-account-create-update-s467z" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.660840 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.672882 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d540-account-create-update-s467z"] Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.681092 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c399bf-5a40-4e29-9056-60b030211a97-operator-scripts\") pod \"nova-cell0-e4ff-account-create-update-5zkr9\" (UID: \"e2c399bf-5a40-4e29-9056-60b030211a97\") " pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.681149 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmj59\" (UniqueName: \"kubernetes.io/projected/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-kube-api-access-cmj59\") pod \"nova-cell1-db-create-h4kdv\" (UID: \"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7\") " pod="openstack/nova-cell1-db-create-h4kdv" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.681202 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfvsq\" (UniqueName: \"kubernetes.io/projected/e2c399bf-5a40-4e29-9056-60b030211a97-kube-api-access-cfvsq\") pod \"nova-cell0-e4ff-account-create-update-5zkr9\" (UID: \"e2c399bf-5a40-4e29-9056-60b030211a97\") " pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.681337 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-operator-scripts\") pod \"nova-cell1-db-create-h4kdv\" (UID: \"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7\") " pod="openstack/nova-cell1-db-create-h4kdv" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.684254 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-operator-scripts\") pod \"nova-cell1-db-create-h4kdv\" (UID: \"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7\") " pod="openstack/nova-cell1-db-create-h4kdv" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.685322 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c399bf-5a40-4e29-9056-60b030211a97-operator-scripts\") pod \"nova-cell0-e4ff-account-create-update-5zkr9\" (UID: \"e2c399bf-5a40-4e29-9056-60b030211a97\") " pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.713993 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmj59\" (UniqueName: \"kubernetes.io/projected/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-kube-api-access-cmj59\") pod \"nova-cell1-db-create-h4kdv\" (UID: \"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7\") " pod="openstack/nova-cell1-db-create-h4kdv" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.715964 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfvsq\" (UniqueName: \"kubernetes.io/projected/e2c399bf-5a40-4e29-9056-60b030211a97-kube-api-access-cfvsq\") pod \"nova-cell0-e4ff-account-create-update-5zkr9\" (UID: \"e2c399bf-5a40-4e29-9056-60b030211a97\") " pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.785535 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpv6x\" (UniqueName: \"kubernetes.io/projected/292dc6c8-2b07-4546-acc2-8cc465c17d4f-kube-api-access-vpv6x\") pod \"nova-cell1-d540-account-create-update-s467z\" (UID: \"292dc6c8-2b07-4546-acc2-8cc465c17d4f\") " pod="openstack/nova-cell1-d540-account-create-update-s467z" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.785631 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/292dc6c8-2b07-4546-acc2-8cc465c17d4f-operator-scripts\") pod \"nova-cell1-d540-account-create-update-s467z\" (UID: \"292dc6c8-2b07-4546-acc2-8cc465c17d4f\") " pod="openstack/nova-cell1-d540-account-create-update-s467z" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.880807 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h4kdv" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.887575 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/292dc6c8-2b07-4546-acc2-8cc465c17d4f-operator-scripts\") pod \"nova-cell1-d540-account-create-update-s467z\" (UID: \"292dc6c8-2b07-4546-acc2-8cc465c17d4f\") " pod="openstack/nova-cell1-d540-account-create-update-s467z" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.887706 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpv6x\" (UniqueName: \"kubernetes.io/projected/292dc6c8-2b07-4546-acc2-8cc465c17d4f-kube-api-access-vpv6x\") pod \"nova-cell1-d540-account-create-update-s467z\" (UID: \"292dc6c8-2b07-4546-acc2-8cc465c17d4f\") " pod="openstack/nova-cell1-d540-account-create-update-s467z" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.890077 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/292dc6c8-2b07-4546-acc2-8cc465c17d4f-operator-scripts\") pod \"nova-cell1-d540-account-create-update-s467z\" (UID: \"292dc6c8-2b07-4546-acc2-8cc465c17d4f\") " pod="openstack/nova-cell1-d540-account-create-update-s467z" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.901236 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.927067 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpv6x\" (UniqueName: \"kubernetes.io/projected/292dc6c8-2b07-4546-acc2-8cc465c17d4f-kube-api-access-vpv6x\") pod \"nova-cell1-d540-account-create-update-s467z\" (UID: \"292dc6c8-2b07-4546-acc2-8cc465c17d4f\") " pod="openstack/nova-cell1-d540-account-create-update-s467z" Feb 27 16:48:18 crc kubenswrapper[4751]: I0227 16:48:18.948721 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-j2chq"] Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.000263 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d540-account-create-update-s467z" Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.261224 4751 generic.go:334] "Generic (PLEG): container finished" podID="e6fe3337-3152-403c-946a-49fb365c99c5" containerID="e168ef42de8017878320ddf0c8de784688d870949ae6fa383c773c43becca118" exitCode=0 Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.261546 4751 generic.go:334] "Generic (PLEG): container finished" podID="e6fe3337-3152-403c-946a-49fb365c99c5" containerID="ece3b53156d4bd583adf821f4cfa7a369365f855ee5afcc774722af014f079ba" exitCode=2 Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.261557 4751 generic.go:334] "Generic (PLEG): container finished" podID="e6fe3337-3152-403c-946a-49fb365c99c5" containerID="39809915829e48c9315635956936ee8185eb8d13ee5704a93b241618189d1658" exitCode=0 Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.261602 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6fe3337-3152-403c-946a-49fb365c99c5","Type":"ContainerDied","Data":"e168ef42de8017878320ddf0c8de784688d870949ae6fa383c773c43becca118"} Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.261632 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6fe3337-3152-403c-946a-49fb365c99c5","Type":"ContainerDied","Data":"ece3b53156d4bd583adf821f4cfa7a369365f855ee5afcc774722af014f079ba"} Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.261641 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6fe3337-3152-403c-946a-49fb365c99c5","Type":"ContainerDied","Data":"39809915829e48c9315635956936ee8185eb8d13ee5704a93b241618189d1658"} Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.270844 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-j2chq" event={"ID":"d074bd48-85b6-4bcf-ad23-bb541f92984d","Type":"ContainerStarted","Data":"34169689f3c52bf3227ed948d0f7bffbaee547666c297ca3f93c8862f93ae755"} Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.270887 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-j2chq" event={"ID":"d074bd48-85b6-4bcf-ad23-bb541f92984d","Type":"ContainerStarted","Data":"40f071ddc4d7a7cfd2f0dbc232db594c90c7ccf1e02fd2344e98426a7953566a"} Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.281132 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c1fb-account-create-update-7gk8w"] Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.293079 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-j2chq" podStartSLOduration=1.293061437 podStartE2EDuration="1.293061437s" podCreationTimestamp="2026-02-27 16:48:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:48:19.288792313 +0000 UTC m=+1461.435806760" watchObservedRunningTime="2026-02-27 16:48:19.293061437 +0000 UTC m=+1461.440075884" Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.420576 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-pnm6t"] Feb 27 16:48:19 crc kubenswrapper[4751]: W0227 16:48:19.438956 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb05cb31e_70e5_4e64_984a_6fa8053743de.slice/crio-c06bd8d9af928aecdf7b351028bce09385fcc89c2f11b36d9a13e4bfbd97ba06 WatchSource:0}: Error finding container c06bd8d9af928aecdf7b351028bce09385fcc89c2f11b36d9a13e4bfbd97ba06: Status 404 returned error can't find the container with id c06bd8d9af928aecdf7b351028bce09385fcc89c2f11b36d9a13e4bfbd97ba06 Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.513606 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e4ff-account-create-update-5zkr9"] Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.523334 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-h4kdv"] Feb 27 16:48:19 crc kubenswrapper[4751]: I0227 16:48:19.681674 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d540-account-create-update-s467z"] Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.279822 4751 generic.go:334] "Generic (PLEG): container finished" podID="8548f461-a34e-4c42-9d42-ee0a8b0bb7c7" containerID="829485fb76c54a23c8e94741447a2ee195eb2dfd182e82c5103b20b0672bbd8a" exitCode=0 Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.279875 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-h4kdv" event={"ID":"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7","Type":"ContainerDied","Data":"829485fb76c54a23c8e94741447a2ee195eb2dfd182e82c5103b20b0672bbd8a"} Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.279939 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-h4kdv" event={"ID":"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7","Type":"ContainerStarted","Data":"274f9b7571960886ac641796e5d44b18418e2739fadce29c56fb3c362cebb528"} Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.281385 4751 generic.go:334] "Generic (PLEG): container finished" podID="b05cb31e-70e5-4e64-984a-6fa8053743de" containerID="9545fcf071b000f82de0b521c0d81780bb6933081b5ec65ff1aaedbe49c02474" exitCode=0 Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.281427 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pnm6t" event={"ID":"b05cb31e-70e5-4e64-984a-6fa8053743de","Type":"ContainerDied","Data":"9545fcf071b000f82de0b521c0d81780bb6933081b5ec65ff1aaedbe49c02474"} Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.281464 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pnm6t" event={"ID":"b05cb31e-70e5-4e64-984a-6fa8053743de","Type":"ContainerStarted","Data":"c06bd8d9af928aecdf7b351028bce09385fcc89c2f11b36d9a13e4bfbd97ba06"} Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.282971 4751 generic.go:334] "Generic (PLEG): container finished" podID="d074bd48-85b6-4bcf-ad23-bb541f92984d" containerID="34169689f3c52bf3227ed948d0f7bffbaee547666c297ca3f93c8862f93ae755" exitCode=0 Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.283040 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-j2chq" event={"ID":"d074bd48-85b6-4bcf-ad23-bb541f92984d","Type":"ContainerDied","Data":"34169689f3c52bf3227ed948d0f7bffbaee547666c297ca3f93c8862f93ae755"} Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.284641 4751 generic.go:334] "Generic (PLEG): container finished" podID="bd714706-b63d-4d97-b9df-8ac662e9dfb0" containerID="bd8d3711fc5befd9ca00621b8c304a63586b9645182862b07589ddabffd6d563" exitCode=0 Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.284707 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c1fb-account-create-update-7gk8w" event={"ID":"bd714706-b63d-4d97-b9df-8ac662e9dfb0","Type":"ContainerDied","Data":"bd8d3711fc5befd9ca00621b8c304a63586b9645182862b07589ddabffd6d563"} Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.284726 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c1fb-account-create-update-7gk8w" event={"ID":"bd714706-b63d-4d97-b9df-8ac662e9dfb0","Type":"ContainerStarted","Data":"bebd445bdeb6d4f155aa67898eaa010c4dadd3cfc1e7f13f5722ed8996327f84"} Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.286109 4751 generic.go:334] "Generic (PLEG): container finished" podID="292dc6c8-2b07-4546-acc2-8cc465c17d4f" containerID="9efb72168b1de3e953c265e1a1a6f4153efc64fb95ad6ba03badc8c64cc24224" exitCode=0 Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.286158 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d540-account-create-update-s467z" event={"ID":"292dc6c8-2b07-4546-acc2-8cc465c17d4f","Type":"ContainerDied","Data":"9efb72168b1de3e953c265e1a1a6f4153efc64fb95ad6ba03badc8c64cc24224"} Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.286179 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d540-account-create-update-s467z" event={"ID":"292dc6c8-2b07-4546-acc2-8cc465c17d4f","Type":"ContainerStarted","Data":"d689b3f30e7207c9229f102d8cf9fa593fd72804367a7a0d332718c2f9fe1586"} Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.287681 4751 generic.go:334] "Generic (PLEG): container finished" podID="e2c399bf-5a40-4e29-9056-60b030211a97" containerID="ca497ae2e1618601a7349af12c9fe9d6b80038bec3b56034e3639dec42ca9521" exitCode=0 Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.287725 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" event={"ID":"e2c399bf-5a40-4e29-9056-60b030211a97","Type":"ContainerDied","Data":"ca497ae2e1618601a7349af12c9fe9d6b80038bec3b56034e3639dec42ca9521"} Feb 27 16:48:20 crc kubenswrapper[4751]: I0227 16:48:20.287749 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" event={"ID":"e2c399bf-5a40-4e29-9056-60b030211a97","Type":"ContainerStarted","Data":"21f7f0a24e795523c830c5697a4e886da369e394049de98345a2df0738b1e335"} Feb 27 16:48:21 crc kubenswrapper[4751]: I0227 16:48:21.327575 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:48:21 crc kubenswrapper[4751]: I0227 16:48:21.327853 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" containerName="glance-log" containerID="cri-o://9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015" gracePeriod=30 Feb 27 16:48:21 crc kubenswrapper[4751]: I0227 16:48:21.327998 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" containerName="glance-httpd" containerID="cri-o://714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53" gracePeriod=30 Feb 27 16:48:21 crc kubenswrapper[4751]: I0227 16:48:21.759347 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" Feb 27 16:48:21 crc kubenswrapper[4751]: I0227 16:48:21.847917 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c399bf-5a40-4e29-9056-60b030211a97-operator-scripts\") pod \"e2c399bf-5a40-4e29-9056-60b030211a97\" (UID: \"e2c399bf-5a40-4e29-9056-60b030211a97\") " Feb 27 16:48:21 crc kubenswrapper[4751]: I0227 16:48:21.848050 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfvsq\" (UniqueName: \"kubernetes.io/projected/e2c399bf-5a40-4e29-9056-60b030211a97-kube-api-access-cfvsq\") pod \"e2c399bf-5a40-4e29-9056-60b030211a97\" (UID: \"e2c399bf-5a40-4e29-9056-60b030211a97\") " Feb 27 16:48:21 crc kubenswrapper[4751]: I0227 16:48:21.850051 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2c399bf-5a40-4e29-9056-60b030211a97-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e2c399bf-5a40-4e29-9056-60b030211a97" (UID: "e2c399bf-5a40-4e29-9056-60b030211a97"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:48:21 crc kubenswrapper[4751]: I0227 16:48:21.855724 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2c399bf-5a40-4e29-9056-60b030211a97-kube-api-access-cfvsq" (OuterVolumeSpecName: "kube-api-access-cfvsq") pod "e2c399bf-5a40-4e29-9056-60b030211a97" (UID: "e2c399bf-5a40-4e29-9056-60b030211a97"). InnerVolumeSpecName "kube-api-access-cfvsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:21 crc kubenswrapper[4751]: I0227 16:48:21.956486 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfvsq\" (UniqueName: \"kubernetes.io/projected/e2c399bf-5a40-4e29-9056-60b030211a97-kube-api-access-cfvsq\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:21 crc kubenswrapper[4751]: I0227 16:48:21.956522 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c399bf-5a40-4e29-9056-60b030211a97-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.000128 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d540-account-create-update-s467z" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.009840 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h4kdv" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.021653 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-j2chq" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.045656 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pnm6t" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.058067 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d074bd48-85b6-4bcf-ad23-bb541f92984d-operator-scripts\") pod \"d074bd48-85b6-4bcf-ad23-bb541f92984d\" (UID: \"d074bd48-85b6-4bcf-ad23-bb541f92984d\") " Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.058118 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/292dc6c8-2b07-4546-acc2-8cc465c17d4f-operator-scripts\") pod \"292dc6c8-2b07-4546-acc2-8cc465c17d4f\" (UID: \"292dc6c8-2b07-4546-acc2-8cc465c17d4f\") " Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.058144 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-operator-scripts\") pod \"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7\" (UID: \"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7\") " Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.058179 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmj59\" (UniqueName: \"kubernetes.io/projected/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-kube-api-access-cmj59\") pod \"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7\" (UID: \"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7\") " Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.058281 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpv6x\" (UniqueName: \"kubernetes.io/projected/292dc6c8-2b07-4546-acc2-8cc465c17d4f-kube-api-access-vpv6x\") pod \"292dc6c8-2b07-4546-acc2-8cc465c17d4f\" (UID: \"292dc6c8-2b07-4546-acc2-8cc465c17d4f\") " Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.058349 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctgx7\" (UniqueName: \"kubernetes.io/projected/d074bd48-85b6-4bcf-ad23-bb541f92984d-kube-api-access-ctgx7\") pod \"d074bd48-85b6-4bcf-ad23-bb541f92984d\" (UID: \"d074bd48-85b6-4bcf-ad23-bb541f92984d\") " Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.059178 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8548f461-a34e-4c42-9d42-ee0a8b0bb7c7" (UID: "8548f461-a34e-4c42-9d42-ee0a8b0bb7c7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.059445 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d074bd48-85b6-4bcf-ad23-bb541f92984d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d074bd48-85b6-4bcf-ad23-bb541f92984d" (UID: "d074bd48-85b6-4bcf-ad23-bb541f92984d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.059842 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/292dc6c8-2b07-4546-acc2-8cc465c17d4f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "292dc6c8-2b07-4546-acc2-8cc465c17d4f" (UID: "292dc6c8-2b07-4546-acc2-8cc465c17d4f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.076568 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-kube-api-access-cmj59" (OuterVolumeSpecName: "kube-api-access-cmj59") pod "8548f461-a34e-4c42-9d42-ee0a8b0bb7c7" (UID: "8548f461-a34e-4c42-9d42-ee0a8b0bb7c7"). InnerVolumeSpecName "kube-api-access-cmj59". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.083437 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/292dc6c8-2b07-4546-acc2-8cc465c17d4f-kube-api-access-vpv6x" (OuterVolumeSpecName: "kube-api-access-vpv6x") pod "292dc6c8-2b07-4546-acc2-8cc465c17d4f" (UID: "292dc6c8-2b07-4546-acc2-8cc465c17d4f"). InnerVolumeSpecName "kube-api-access-vpv6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.083674 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d074bd48-85b6-4bcf-ad23-bb541f92984d-kube-api-access-ctgx7" (OuterVolumeSpecName: "kube-api-access-ctgx7") pod "d074bd48-85b6-4bcf-ad23-bb541f92984d" (UID: "d074bd48-85b6-4bcf-ad23-bb541f92984d"). InnerVolumeSpecName "kube-api-access-ctgx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.088030 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c1fb-account-create-update-7gk8w" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.159630 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkrgn\" (UniqueName: \"kubernetes.io/projected/bd714706-b63d-4d97-b9df-8ac662e9dfb0-kube-api-access-hkrgn\") pod \"bd714706-b63d-4d97-b9df-8ac662e9dfb0\" (UID: \"bd714706-b63d-4d97-b9df-8ac662e9dfb0\") " Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.159762 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b05cb31e-70e5-4e64-984a-6fa8053743de-operator-scripts\") pod \"b05cb31e-70e5-4e64-984a-6fa8053743de\" (UID: \"b05cb31e-70e5-4e64-984a-6fa8053743de\") " Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.159813 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jthn\" (UniqueName: \"kubernetes.io/projected/b05cb31e-70e5-4e64-984a-6fa8053743de-kube-api-access-2jthn\") pod \"b05cb31e-70e5-4e64-984a-6fa8053743de\" (UID: \"b05cb31e-70e5-4e64-984a-6fa8053743de\") " Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.159841 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd714706-b63d-4d97-b9df-8ac662e9dfb0-operator-scripts\") pod \"bd714706-b63d-4d97-b9df-8ac662e9dfb0\" (UID: \"bd714706-b63d-4d97-b9df-8ac662e9dfb0\") " Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.160388 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctgx7\" (UniqueName: \"kubernetes.io/projected/d074bd48-85b6-4bcf-ad23-bb541f92984d-kube-api-access-ctgx7\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.160440 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d074bd48-85b6-4bcf-ad23-bb541f92984d-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.160452 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/292dc6c8-2b07-4546-acc2-8cc465c17d4f-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.160463 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.160475 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmj59\" (UniqueName: \"kubernetes.io/projected/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7-kube-api-access-cmj59\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.160511 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpv6x\" (UniqueName: \"kubernetes.io/projected/292dc6c8-2b07-4546-acc2-8cc465c17d4f-kube-api-access-vpv6x\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.161166 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd714706-b63d-4d97-b9df-8ac662e9dfb0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bd714706-b63d-4d97-b9df-8ac662e9dfb0" (UID: "bd714706-b63d-4d97-b9df-8ac662e9dfb0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.162188 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05cb31e-70e5-4e64-984a-6fa8053743de-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b05cb31e-70e5-4e64-984a-6fa8053743de" (UID: "b05cb31e-70e5-4e64-984a-6fa8053743de"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.168849 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b05cb31e-70e5-4e64-984a-6fa8053743de-kube-api-access-2jthn" (OuterVolumeSpecName: "kube-api-access-2jthn") pod "b05cb31e-70e5-4e64-984a-6fa8053743de" (UID: "b05cb31e-70e5-4e64-984a-6fa8053743de"). InnerVolumeSpecName "kube-api-access-2jthn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.170839 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd714706-b63d-4d97-b9df-8ac662e9dfb0-kube-api-access-hkrgn" (OuterVolumeSpecName: "kube-api-access-hkrgn") pod "bd714706-b63d-4d97-b9df-8ac662e9dfb0" (UID: "bd714706-b63d-4d97-b9df-8ac662e9dfb0"). InnerVolumeSpecName "kube-api-access-hkrgn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.262542 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkrgn\" (UniqueName: \"kubernetes.io/projected/bd714706-b63d-4d97-b9df-8ac662e9dfb0-kube-api-access-hkrgn\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.262596 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b05cb31e-70e5-4e64-984a-6fa8053743de-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.262610 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jthn\" (UniqueName: \"kubernetes.io/projected/b05cb31e-70e5-4e64-984a-6fa8053743de-kube-api-access-2jthn\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.262622 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd714706-b63d-4d97-b9df-8ac662e9dfb0-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.308156 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c1fb-account-create-update-7gk8w" event={"ID":"bd714706-b63d-4d97-b9df-8ac662e9dfb0","Type":"ContainerDied","Data":"bebd445bdeb6d4f155aa67898eaa010c4dadd3cfc1e7f13f5722ed8996327f84"} Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.308184 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c1fb-account-create-update-7gk8w" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.308203 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bebd445bdeb6d4f155aa67898eaa010c4dadd3cfc1e7f13f5722ed8996327f84" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.310068 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d540-account-create-update-s467z" event={"ID":"292dc6c8-2b07-4546-acc2-8cc465c17d4f","Type":"ContainerDied","Data":"d689b3f30e7207c9229f102d8cf9fa593fd72804367a7a0d332718c2f9fe1586"} Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.310104 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d689b3f30e7207c9229f102d8cf9fa593fd72804367a7a0d332718c2f9fe1586" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.310104 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d540-account-create-update-s467z" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.311371 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" event={"ID":"e2c399bf-5a40-4e29-9056-60b030211a97","Type":"ContainerDied","Data":"21f7f0a24e795523c830c5697a4e886da369e394049de98345a2df0738b1e335"} Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.311391 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21f7f0a24e795523c830c5697a4e886da369e394049de98345a2df0738b1e335" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.311499 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e4ff-account-create-update-5zkr9" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.312665 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h4kdv" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.312639 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-h4kdv" event={"ID":"8548f461-a34e-4c42-9d42-ee0a8b0bb7c7","Type":"ContainerDied","Data":"274f9b7571960886ac641796e5d44b18418e2739fadce29c56fb3c362cebb528"} Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.312779 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="274f9b7571960886ac641796e5d44b18418e2739fadce29c56fb3c362cebb528" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.313973 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pnm6t" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.313981 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pnm6t" event={"ID":"b05cb31e-70e5-4e64-984a-6fa8053743de","Type":"ContainerDied","Data":"c06bd8d9af928aecdf7b351028bce09385fcc89c2f11b36d9a13e4bfbd97ba06"} Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.314016 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c06bd8d9af928aecdf7b351028bce09385fcc89c2f11b36d9a13e4bfbd97ba06" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.315164 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-j2chq" event={"ID":"d074bd48-85b6-4bcf-ad23-bb541f92984d","Type":"ContainerDied","Data":"40f071ddc4d7a7cfd2f0dbc232db594c90c7ccf1e02fd2344e98426a7953566a"} Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.315189 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40f071ddc4d7a7cfd2f0dbc232db594c90c7ccf1e02fd2344e98426a7953566a" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.315223 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-j2chq" Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.338216 4751 generic.go:334] "Generic (PLEG): container finished" podID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" containerID="9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015" exitCode=143 Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.338743 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"efb8a6cc-5def-4be4-82e1-b20f19d1c800","Type":"ContainerDied","Data":"9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015"} Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.655503 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.655743 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f864f57e-a41a-4e30-9293-8ede35ea08dd" containerName="glance-log" containerID="cri-o://fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2" gracePeriod=30 Feb 27 16:48:22 crc kubenswrapper[4751]: I0227 16:48:22.655877 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f864f57e-a41a-4e30-9293-8ede35ea08dd" containerName="glance-httpd" containerID="cri-o://e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af" gracePeriod=30 Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.349054 4751 generic.go:334] "Generic (PLEG): container finished" podID="e6fe3337-3152-403c-946a-49fb365c99c5" containerID="38f9ad27baed35d56d749ed6e9803d5bf8fa23f102016cb9ae6cda807bd2efbe" exitCode=0 Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.349136 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6fe3337-3152-403c-946a-49fb365c99c5","Type":"ContainerDied","Data":"38f9ad27baed35d56d749ed6e9803d5bf8fa23f102016cb9ae6cda807bd2efbe"} Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.351687 4751 generic.go:334] "Generic (PLEG): container finished" podID="f864f57e-a41a-4e30-9293-8ede35ea08dd" containerID="fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2" exitCode=143 Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.351741 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f864f57e-a41a-4e30-9293-8ede35ea08dd","Type":"ContainerDied","Data":"fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2"} Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.460237 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.490743 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-run-httpd\") pod \"e6fe3337-3152-403c-946a-49fb365c99c5\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.490798 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-log-httpd\") pod \"e6fe3337-3152-403c-946a-49fb365c99c5\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.490835 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-combined-ca-bundle\") pod \"e6fe3337-3152-403c-946a-49fb365c99c5\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.490955 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-sg-core-conf-yaml\") pod \"e6fe3337-3152-403c-946a-49fb365c99c5\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.490990 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-scripts\") pod \"e6fe3337-3152-403c-946a-49fb365c99c5\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.491012 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-882pc\" (UniqueName: \"kubernetes.io/projected/e6fe3337-3152-403c-946a-49fb365c99c5-kube-api-access-882pc\") pod \"e6fe3337-3152-403c-946a-49fb365c99c5\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.491054 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-config-data\") pod \"e6fe3337-3152-403c-946a-49fb365c99c5\" (UID: \"e6fe3337-3152-403c-946a-49fb365c99c5\") " Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.494391 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e6fe3337-3152-403c-946a-49fb365c99c5" (UID: "e6fe3337-3152-403c-946a-49fb365c99c5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.494749 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e6fe3337-3152-403c-946a-49fb365c99c5" (UID: "e6fe3337-3152-403c-946a-49fb365c99c5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.504535 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-scripts" (OuterVolumeSpecName: "scripts") pod "e6fe3337-3152-403c-946a-49fb365c99c5" (UID: "e6fe3337-3152-403c-946a-49fb365c99c5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.513128 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6fe3337-3152-403c-946a-49fb365c99c5-kube-api-access-882pc" (OuterVolumeSpecName: "kube-api-access-882pc") pod "e6fe3337-3152-403c-946a-49fb365c99c5" (UID: "e6fe3337-3152-403c-946a-49fb365c99c5"). InnerVolumeSpecName "kube-api-access-882pc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.519828 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e6fe3337-3152-403c-946a-49fb365c99c5" (UID: "e6fe3337-3152-403c-946a-49fb365c99c5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.585175 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-config-data" (OuterVolumeSpecName: "config-data") pod "e6fe3337-3152-403c-946a-49fb365c99c5" (UID: "e6fe3337-3152-403c-946a-49fb365c99c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.587310 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6fe3337-3152-403c-946a-49fb365c99c5" (UID: "e6fe3337-3152-403c-946a-49fb365c99c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.593749 4751 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.593775 4751 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6fe3337-3152-403c-946a-49fb365c99c5-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.593784 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.593795 4751 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.593803 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.593811 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-882pc\" (UniqueName: \"kubernetes.io/projected/e6fe3337-3152-403c-946a-49fb365c99c5-kube-api-access-882pc\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.593819 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6fe3337-3152-403c-946a-49fb365c99c5-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.869476 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-mcjmb"] Feb 27 16:48:23 crc kubenswrapper[4751]: E0227 16:48:23.870032 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="ceilometer-notification-agent" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870048 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="ceilometer-notification-agent" Feb 27 16:48:23 crc kubenswrapper[4751]: E0227 16:48:23.870061 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="ceilometer-central-agent" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870069 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="ceilometer-central-agent" Feb 27 16:48:23 crc kubenswrapper[4751]: E0227 16:48:23.870094 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="sg-core" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870100 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="sg-core" Feb 27 16:48:23 crc kubenswrapper[4751]: E0227 16:48:23.870108 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b05cb31e-70e5-4e64-984a-6fa8053743de" containerName="mariadb-database-create" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870115 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b05cb31e-70e5-4e64-984a-6fa8053743de" containerName="mariadb-database-create" Feb 27 16:48:23 crc kubenswrapper[4751]: E0227 16:48:23.870126 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd714706-b63d-4d97-b9df-8ac662e9dfb0" containerName="mariadb-account-create-update" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870131 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd714706-b63d-4d97-b9df-8ac662e9dfb0" containerName="mariadb-account-create-update" Feb 27 16:48:23 crc kubenswrapper[4751]: E0227 16:48:23.870140 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="292dc6c8-2b07-4546-acc2-8cc465c17d4f" containerName="mariadb-account-create-update" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870147 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="292dc6c8-2b07-4546-acc2-8cc465c17d4f" containerName="mariadb-account-create-update" Feb 27 16:48:23 crc kubenswrapper[4751]: E0227 16:48:23.870156 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d074bd48-85b6-4bcf-ad23-bb541f92984d" containerName="mariadb-database-create" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870162 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d074bd48-85b6-4bcf-ad23-bb541f92984d" containerName="mariadb-database-create" Feb 27 16:48:23 crc kubenswrapper[4751]: E0227 16:48:23.870176 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2c399bf-5a40-4e29-9056-60b030211a97" containerName="mariadb-account-create-update" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870182 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2c399bf-5a40-4e29-9056-60b030211a97" containerName="mariadb-account-create-update" Feb 27 16:48:23 crc kubenswrapper[4751]: E0227 16:48:23.870192 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="proxy-httpd" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870197 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="proxy-httpd" Feb 27 16:48:23 crc kubenswrapper[4751]: E0227 16:48:23.870205 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8548f461-a34e-4c42-9d42-ee0a8b0bb7c7" containerName="mariadb-database-create" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870211 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="8548f461-a34e-4c42-9d42-ee0a8b0bb7c7" containerName="mariadb-database-create" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870368 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="292dc6c8-2b07-4546-acc2-8cc465c17d4f" containerName="mariadb-account-create-update" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870379 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b05cb31e-70e5-4e64-984a-6fa8053743de" containerName="mariadb-database-create" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870390 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd714706-b63d-4d97-b9df-8ac662e9dfb0" containerName="mariadb-account-create-update" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870403 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="proxy-httpd" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870472 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="ceilometer-central-agent" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870480 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="8548f461-a34e-4c42-9d42-ee0a8b0bb7c7" containerName="mariadb-database-create" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870490 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="ceilometer-notification-agent" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870499 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" containerName="sg-core" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870507 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2c399bf-5a40-4e29-9056-60b030211a97" containerName="mariadb-account-create-update" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.870523 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d074bd48-85b6-4bcf-ad23-bb541f92984d" containerName="mariadb-database-create" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.871065 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.872879 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.873106 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-kd2ms" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.873393 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.885039 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-mcjmb"] Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.900920 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcn7p\" (UniqueName: \"kubernetes.io/projected/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-kube-api-access-jcn7p\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.901184 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-scripts\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.901342 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-config-data\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:23 crc kubenswrapper[4751]: I0227 16:48:23.901456 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.002912 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-config-data\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.002989 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.003051 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcn7p\" (UniqueName: \"kubernetes.io/projected/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-kube-api-access-jcn7p\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.003113 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-scripts\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.006775 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-scripts\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.007011 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.007623 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-config-data\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.023551 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcn7p\" (UniqueName: \"kubernetes.io/projected/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-kube-api-access-jcn7p\") pod \"nova-cell0-conductor-db-sync-mcjmb\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.189593 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.382278 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6fe3337-3152-403c-946a-49fb365c99c5","Type":"ContainerDied","Data":"cfa8040f356dc7d0b68feb3d9fd946652ead9df27a3c7bb6755186b9fcd03032"} Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.382568 4751 scope.go:117] "RemoveContainer" containerID="e168ef42de8017878320ddf0c8de784688d870949ae6fa383c773c43becca118" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.382516 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.421519 4751 scope.go:117] "RemoveContainer" containerID="ece3b53156d4bd583adf821f4cfa7a369365f855ee5afcc774722af014f079ba" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.429472 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.447068 4751 scope.go:117] "RemoveContainer" containerID="39809915829e48c9315635956936ee8185eb8d13ee5704a93b241618189d1658" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.463156 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.472189 4751 scope.go:117] "RemoveContainer" containerID="38f9ad27baed35d56d749ed6e9803d5bf8fa23f102016cb9ae6cda807bd2efbe" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.472204 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.474616 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.480408 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.480534 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.482654 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.510605 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lg6gl\" (UniqueName: \"kubernetes.io/projected/596283dd-65b4-4e12-aada-02133b846729-kube-api-access-lg6gl\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.510877 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.510996 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-run-httpd\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.511181 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-scripts\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.511290 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-log-httpd\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.511475 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.511647 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-config-data\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.535729 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6fe3337-3152-403c-946a-49fb365c99c5" path="/var/lib/kubelet/pods/e6fe3337-3152-403c-946a-49fb365c99c5/volumes" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.612821 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-scripts\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.612864 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-log-httpd\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.612895 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.612956 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-config-data\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.612990 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lg6gl\" (UniqueName: \"kubernetes.io/projected/596283dd-65b4-4e12-aada-02133b846729-kube-api-access-lg6gl\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.613009 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.613033 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-run-httpd\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.613519 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-run-httpd\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.615433 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-log-httpd\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.619153 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-config-data\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.620248 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.620457 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-scripts\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.627097 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.633458 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lg6gl\" (UniqueName: \"kubernetes.io/projected/596283dd-65b4-4e12-aada-02133b846729-kube-api-access-lg6gl\") pod \"ceilometer-0\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " pod="openstack/ceilometer-0" Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.663258 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-mcjmb"] Feb 27 16:48:24 crc kubenswrapper[4751]: I0227 16:48:24.794932 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.032945 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.126786 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-public-tls-certs\") pod \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.126851 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-combined-ca-bundle\") pod \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.126871 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.126913 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-scripts\") pod \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.126953 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-logs\") pod \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.127554 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-logs" (OuterVolumeSpecName: "logs") pod "efb8a6cc-5def-4be4-82e1-b20f19d1c800" (UID: "efb8a6cc-5def-4be4-82e1-b20f19d1c800"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.145646 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-scripts" (OuterVolumeSpecName: "scripts") pod "efb8a6cc-5def-4be4-82e1-b20f19d1c800" (UID: "efb8a6cc-5def-4be4-82e1-b20f19d1c800"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.145705 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "efb8a6cc-5def-4be4-82e1-b20f19d1c800" (UID: "efb8a6cc-5def-4be4-82e1-b20f19d1c800"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.204269 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "efb8a6cc-5def-4be4-82e1-b20f19d1c800" (UID: "efb8a6cc-5def-4be4-82e1-b20f19d1c800"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.225277 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "efb8a6cc-5def-4be4-82e1-b20f19d1c800" (UID: "efb8a6cc-5def-4be4-82e1-b20f19d1c800"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.231344 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-httpd-run\") pod \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.231375 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-config-data\") pod \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.231459 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn768\" (UniqueName: \"kubernetes.io/projected/efb8a6cc-5def-4be4-82e1-b20f19d1c800-kube-api-access-xn768\") pod \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\" (UID: \"efb8a6cc-5def-4be4-82e1-b20f19d1c800\") " Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.231783 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.231793 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.231825 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.231837 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.231855 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.235770 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "efb8a6cc-5def-4be4-82e1-b20f19d1c800" (UID: "efb8a6cc-5def-4be4-82e1-b20f19d1c800"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.241443 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efb8a6cc-5def-4be4-82e1-b20f19d1c800-kube-api-access-xn768" (OuterVolumeSpecName: "kube-api-access-xn768") pod "efb8a6cc-5def-4be4-82e1-b20f19d1c800" (UID: "efb8a6cc-5def-4be4-82e1-b20f19d1c800"). InnerVolumeSpecName "kube-api-access-xn768". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.274384 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.313040 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-config-data" (OuterVolumeSpecName: "config-data") pod "efb8a6cc-5def-4be4-82e1-b20f19d1c800" (UID: "efb8a6cc-5def-4be4-82e1-b20f19d1c800"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.334157 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn768\" (UniqueName: \"kubernetes.io/projected/efb8a6cc-5def-4be4-82e1-b20f19d1c800-kube-api-access-xn768\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.334208 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.334219 4751 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/efb8a6cc-5def-4be4-82e1-b20f19d1c800-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.334229 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efb8a6cc-5def-4be4-82e1-b20f19d1c800-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.393278 4751 generic.go:334] "Generic (PLEG): container finished" podID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" containerID="714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53" exitCode=0 Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.393331 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"efb8a6cc-5def-4be4-82e1-b20f19d1c800","Type":"ContainerDied","Data":"714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53"} Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.393356 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"efb8a6cc-5def-4be4-82e1-b20f19d1c800","Type":"ContainerDied","Data":"b08a234d7afcb1186bb65406412c781713c7c3f63a3ccdc53d4d086783801785"} Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.393371 4751 scope.go:117] "RemoveContainer" containerID="714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.393495 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.400559 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-mcjmb" event={"ID":"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241","Type":"ContainerStarted","Data":"128c04a25993ea107d641ea522d3b1d4424890eca15bbd46a467cfcf68966f29"} Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.420546 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.447609 4751 scope.go:117] "RemoveContainer" containerID="9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.449942 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.461485 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.470974 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:48:25 crc kubenswrapper[4751]: E0227 16:48:25.471427 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" containerName="glance-httpd" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.471444 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" containerName="glance-httpd" Feb 27 16:48:25 crc kubenswrapper[4751]: E0227 16:48:25.471468 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" containerName="glance-log" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.471476 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" containerName="glance-log" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.471683 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" containerName="glance-httpd" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.471706 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" containerName="glance-log" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.472697 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.472868 4751 scope.go:117] "RemoveContainer" containerID="714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53" Feb 27 16:48:25 crc kubenswrapper[4751]: E0227 16:48:25.473484 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53\": container with ID starting with 714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53 not found: ID does not exist" containerID="714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.473513 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53"} err="failed to get container status \"714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53\": rpc error: code = NotFound desc = could not find container \"714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53\": container with ID starting with 714aa704291a6040e48f2ee626814b51c2a58b2bf0d958cef605f1059bbfaf53 not found: ID does not exist" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.473534 4751 scope.go:117] "RemoveContainer" containerID="9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.474703 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.474869 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 27 16:48:25 crc kubenswrapper[4751]: E0227 16:48:25.476077 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015\": container with ID starting with 9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015 not found: ID does not exist" containerID="9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.476102 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015"} err="failed to get container status \"9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015\": rpc error: code = NotFound desc = could not find container \"9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015\": container with ID starting with 9deea4b5ebc9b2d7bf3b3d4ed606e017f437870861c43f0542112993c9aa7015 not found: ID does not exist" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.484357 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.640356 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-config-data\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.640391 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.640482 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-logs\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.640500 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-scripts\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.640517 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.640557 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.640585 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.640664 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hhcz\" (UniqueName: \"kubernetes.io/projected/ef932397-22e9-4d46-90e3-57076299d4cf-kube-api-access-8hhcz\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.741974 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.742237 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.742273 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.742430 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hhcz\" (UniqueName: \"kubernetes.io/projected/ef932397-22e9-4d46-90e3-57076299d4cf-kube-api-access-8hhcz\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.742485 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-config-data\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.742501 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.742659 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.743365 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-logs\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.743423 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-scripts\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.743447 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.743760 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-logs\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.746951 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-scripts\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.747103 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.747634 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.749445 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-config-data\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.763201 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hhcz\" (UniqueName: \"kubernetes.io/projected/ef932397-22e9-4d46-90e3-57076299d4cf-kube-api-access-8hhcz\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.781370 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " pod="openstack/glance-default-external-api-0" Feb 27 16:48:25 crc kubenswrapper[4751]: I0227 16:48:25.795395 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.323490 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.399386 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:48:26 crc kubenswrapper[4751]: W0227 16:48:26.409689 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef932397_22e9_4d46_90e3_57076299d4cf.slice/crio-2cd146dc73af18c958533dc6b08a6c4910148ec1236dcae4148ea3c80ca24ea9 WatchSource:0}: Error finding container 2cd146dc73af18c958533dc6b08a6c4910148ec1236dcae4148ea3c80ca24ea9: Status 404 returned error can't find the container with id 2cd146dc73af18c958533dc6b08a6c4910148ec1236dcae4148ea3c80ca24ea9 Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.422024 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"596283dd-65b4-4e12-aada-02133b846729","Type":"ContainerStarted","Data":"dd6fc2dfc7352ff22fee3d0450c46d9ee15988df0afc980bf3431b375003b46c"} Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.422067 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"596283dd-65b4-4e12-aada-02133b846729","Type":"ContainerStarted","Data":"3b0f96dda3c406e0b40fafb567684ced5edf6a139cdd53c7e8e12a629cf6d46c"} Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.427272 4751 generic.go:334] "Generic (PLEG): container finished" podID="f864f57e-a41a-4e30-9293-8ede35ea08dd" containerID="e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af" exitCode=0 Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.427538 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.427554 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f864f57e-a41a-4e30-9293-8ede35ea08dd","Type":"ContainerDied","Data":"e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af"} Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.427770 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f864f57e-a41a-4e30-9293-8ede35ea08dd","Type":"ContainerDied","Data":"67ea88b9b2b75da50386f5bdef1832861a8980c0c0642bb0f9fba4c4c32e7303"} Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.427790 4751 scope.go:117] "RemoveContainer" containerID="e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.457629 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.461399 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-config-data\") pod \"f864f57e-a41a-4e30-9293-8ede35ea08dd\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.461465 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-httpd-run\") pod \"f864f57e-a41a-4e30-9293-8ede35ea08dd\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.461522 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-combined-ca-bundle\") pod \"f864f57e-a41a-4e30-9293-8ede35ea08dd\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.461555 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-internal-tls-certs\") pod \"f864f57e-a41a-4e30-9293-8ede35ea08dd\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.461611 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-scripts\") pod \"f864f57e-a41a-4e30-9293-8ede35ea08dd\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.461641 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whjlg\" (UniqueName: \"kubernetes.io/projected/f864f57e-a41a-4e30-9293-8ede35ea08dd-kube-api-access-whjlg\") pod \"f864f57e-a41a-4e30-9293-8ede35ea08dd\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.461717 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-logs\") pod \"f864f57e-a41a-4e30-9293-8ede35ea08dd\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.461781 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"f864f57e-a41a-4e30-9293-8ede35ea08dd\" (UID: \"f864f57e-a41a-4e30-9293-8ede35ea08dd\") " Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.462973 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f864f57e-a41a-4e30-9293-8ede35ea08dd" (UID: "f864f57e-a41a-4e30-9293-8ede35ea08dd"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.467239 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-logs" (OuterVolumeSpecName: "logs") pod "f864f57e-a41a-4e30-9293-8ede35ea08dd" (UID: "f864f57e-a41a-4e30-9293-8ede35ea08dd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.470052 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "f864f57e-a41a-4e30-9293-8ede35ea08dd" (UID: "f864f57e-a41a-4e30-9293-8ede35ea08dd"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.470484 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f864f57e-a41a-4e30-9293-8ede35ea08dd-kube-api-access-whjlg" (OuterVolumeSpecName: "kube-api-access-whjlg") pod "f864f57e-a41a-4e30-9293-8ede35ea08dd" (UID: "f864f57e-a41a-4e30-9293-8ede35ea08dd"). InnerVolumeSpecName "kube-api-access-whjlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.480528 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-scripts" (OuterVolumeSpecName: "scripts") pod "f864f57e-a41a-4e30-9293-8ede35ea08dd" (UID: "f864f57e-a41a-4e30-9293-8ede35ea08dd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.486210 4751 scope.go:117] "RemoveContainer" containerID="fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.493214 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f864f57e-a41a-4e30-9293-8ede35ea08dd" (UID: "f864f57e-a41a-4e30-9293-8ede35ea08dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.527177 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-config-data" (OuterVolumeSpecName: "config-data") pod "f864f57e-a41a-4e30-9293-8ede35ea08dd" (UID: "f864f57e-a41a-4e30-9293-8ede35ea08dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.535302 4751 scope.go:117] "RemoveContainer" containerID="e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af" Feb 27 16:48:26 crc kubenswrapper[4751]: E0227 16:48:26.543086 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af\": container with ID starting with e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af not found: ID does not exist" containerID="e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.543131 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af"} err="failed to get container status \"e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af\": rpc error: code = NotFound desc = could not find container \"e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af\": container with ID starting with e143dc066b449f2f33b103233149d43b2ba11f124d24c1804c8d33d492af04af not found: ID does not exist" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.543154 4751 scope.go:117] "RemoveContainer" containerID="fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2" Feb 27 16:48:26 crc kubenswrapper[4751]: E0227 16:48:26.544246 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2\": container with ID starting with fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2 not found: ID does not exist" containerID="fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.544269 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2"} err="failed to get container status \"fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2\": rpc error: code = NotFound desc = could not find container \"fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2\": container with ID starting with fe0e9a4bd83db75eb5b6dc09f04fff2d883c2022dd54ab3c5ad2e44e37c4dab2 not found: ID does not exist" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.550890 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efb8a6cc-5def-4be4-82e1-b20f19d1c800" path="/var/lib/kubelet/pods/efb8a6cc-5def-4be4-82e1-b20f19d1c800/volumes" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.555989 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f864f57e-a41a-4e30-9293-8ede35ea08dd" (UID: "f864f57e-a41a-4e30-9293-8ede35ea08dd"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.564599 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.564628 4751 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.564636 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.564645 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.564675 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f864f57e-a41a-4e30-9293-8ede35ea08dd-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.564686 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whjlg\" (UniqueName: \"kubernetes.io/projected/f864f57e-a41a-4e30-9293-8ede35ea08dd-kube-api-access-whjlg\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.564695 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f864f57e-a41a-4e30-9293-8ede35ea08dd-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.564720 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.584585 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.667642 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.867717 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.893816 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.906040 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:48:26 crc kubenswrapper[4751]: E0227 16:48:26.906483 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f864f57e-a41a-4e30-9293-8ede35ea08dd" containerName="glance-httpd" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.906504 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f864f57e-a41a-4e30-9293-8ede35ea08dd" containerName="glance-httpd" Feb 27 16:48:26 crc kubenswrapper[4751]: E0227 16:48:26.906517 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f864f57e-a41a-4e30-9293-8ede35ea08dd" containerName="glance-log" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.906523 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f864f57e-a41a-4e30-9293-8ede35ea08dd" containerName="glance-log" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.906693 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f864f57e-a41a-4e30-9293-8ede35ea08dd" containerName="glance-log" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.906716 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f864f57e-a41a-4e30-9293-8ede35ea08dd" containerName="glance-httpd" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.907714 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.909815 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.909962 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 27 16:48:26 crc kubenswrapper[4751]: I0227 16:48:26.914479 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.077143 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.077187 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.077252 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-logs\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.077271 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.077288 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.077313 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.077345 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttdxf\" (UniqueName: \"kubernetes.io/projected/2ad24b50-556b-4799-a598-b7618c1664fd-kube-api-access-ttdxf\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.077388 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.180278 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.180317 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.180377 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-logs\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.180392 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.180717 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.180926 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-logs\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.181001 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.181053 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.181136 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttdxf\" (UniqueName: \"kubernetes.io/projected/2ad24b50-556b-4799-a598-b7618c1664fd-kube-api-access-ttdxf\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.181147 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.181156 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.186384 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.187265 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.188124 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.188973 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.202185 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttdxf\" (UniqueName: \"kubernetes.io/projected/2ad24b50-556b-4799-a598-b7618c1664fd-kube-api-access-ttdxf\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.222046 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.249798 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.458646 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef932397-22e9-4d46-90e3-57076299d4cf","Type":"ContainerStarted","Data":"d6c9a2d485d12adf1118656837947a9c97a35defd0392ad64dca5800e77a1603"} Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.458975 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef932397-22e9-4d46-90e3-57076299d4cf","Type":"ContainerStarted","Data":"2cd146dc73af18c958533dc6b08a6c4910148ec1236dcae4148ea3c80ca24ea9"} Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.466788 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"596283dd-65b4-4e12-aada-02133b846729","Type":"ContainerStarted","Data":"3e58635380583d5984af4668f25c4d5a686648ddaf54a32a7d5d5acbb3d23426"} Feb 27 16:48:27 crc kubenswrapper[4751]: I0227 16:48:27.838489 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:48:27 crc kubenswrapper[4751]: W0227 16:48:27.860825 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ad24b50_556b_4799_a598_b7618c1664fd.slice/crio-0295219047b4cea9155062b971b187d656f90229fd18c0bfc6f8a4a588b308d1 WatchSource:0}: Error finding container 0295219047b4cea9155062b971b187d656f90229fd18c0bfc6f8a4a588b308d1: Status 404 returned error can't find the container with id 0295219047b4cea9155062b971b187d656f90229fd18c0bfc6f8a4a588b308d1 Feb 27 16:48:28 crc kubenswrapper[4751]: I0227 16:48:28.495975 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef932397-22e9-4d46-90e3-57076299d4cf","Type":"ContainerStarted","Data":"2845974abbc25e68928a72daeb08093bf2536ab0bc6998e59ff8fa1ec52eba91"} Feb 27 16:48:28 crc kubenswrapper[4751]: I0227 16:48:28.509999 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"596283dd-65b4-4e12-aada-02133b846729","Type":"ContainerStarted","Data":"5e9d13da4d7498f1d3beb27d630971361f8c5b0e5b9d41e463227a457f0630a4"} Feb 27 16:48:28 crc kubenswrapper[4751]: I0227 16:48:28.549029 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f864f57e-a41a-4e30-9293-8ede35ea08dd" path="/var/lib/kubelet/pods/f864f57e-a41a-4e30-9293-8ede35ea08dd/volumes" Feb 27 16:48:28 crc kubenswrapper[4751]: I0227 16:48:28.550025 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2ad24b50-556b-4799-a598-b7618c1664fd","Type":"ContainerStarted","Data":"e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e"} Feb 27 16:48:28 crc kubenswrapper[4751]: I0227 16:48:28.550057 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2ad24b50-556b-4799-a598-b7618c1664fd","Type":"ContainerStarted","Data":"0295219047b4cea9155062b971b187d656f90229fd18c0bfc6f8a4a588b308d1"} Feb 27 16:48:28 crc kubenswrapper[4751]: I0227 16:48:28.557950 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.5578971470000003 podStartE2EDuration="3.557897147s" podCreationTimestamp="2026-02-27 16:48:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:48:28.547863869 +0000 UTC m=+1470.694878326" watchObservedRunningTime="2026-02-27 16:48:28.557897147 +0000 UTC m=+1470.704911594" Feb 27 16:48:29 crc kubenswrapper[4751]: I0227 16:48:29.555453 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2ad24b50-556b-4799-a598-b7618c1664fd","Type":"ContainerStarted","Data":"784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8"} Feb 27 16:48:29 crc kubenswrapper[4751]: I0227 16:48:29.584530 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.584508192 podStartE2EDuration="3.584508192s" podCreationTimestamp="2026-02-27 16:48:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:48:29.573100897 +0000 UTC m=+1471.720115344" watchObservedRunningTime="2026-02-27 16:48:29.584508192 +0000 UTC m=+1471.731522639" Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.628275 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"596283dd-65b4-4e12-aada-02133b846729","Type":"ContainerStarted","Data":"ce9634e3d58b7868a1d2448d3492ee99835767a8174dabdc958d53dfcef5785b"} Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.628888 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.628391 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="proxy-httpd" containerID="cri-o://ce9634e3d58b7868a1d2448d3492ee99835767a8174dabdc958d53dfcef5785b" gracePeriod=30 Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.628335 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="ceilometer-central-agent" containerID="cri-o://dd6fc2dfc7352ff22fee3d0450c46d9ee15988df0afc980bf3431b375003b46c" gracePeriod=30 Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.628446 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="ceilometer-notification-agent" containerID="cri-o://3e58635380583d5984af4668f25c4d5a686648ddaf54a32a7d5d5acbb3d23426" gracePeriod=30 Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.628490 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="sg-core" containerID="cri-o://5e9d13da4d7498f1d3beb27d630971361f8c5b0e5b9d41e463227a457f0630a4" gracePeriod=30 Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.633600 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-mcjmb" event={"ID":"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241","Type":"ContainerStarted","Data":"1fd43654ff8fd38e1d4035a1687be06f7000402fb97029086ff1253d7093fa1c"} Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.666771 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.458133447 podStartE2EDuration="11.666748406s" podCreationTimestamp="2026-02-27 16:48:24 +0000 UTC" firstStartedPulling="2026-02-27 16:48:25.4628454 +0000 UTC m=+1467.609859847" lastFinishedPulling="2026-02-27 16:48:34.671460359 +0000 UTC m=+1476.818474806" observedRunningTime="2026-02-27 16:48:35.652281499 +0000 UTC m=+1477.799295966" watchObservedRunningTime="2026-02-27 16:48:35.666748406 +0000 UTC m=+1477.813762873" Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.676644 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-mcjmb" podStartSLOduration=2.66495183 podStartE2EDuration="12.67662792s" podCreationTimestamp="2026-02-27 16:48:23 +0000 UTC" firstStartedPulling="2026-02-27 16:48:24.672777396 +0000 UTC m=+1466.819791843" lastFinishedPulling="2026-02-27 16:48:34.684453486 +0000 UTC m=+1476.831467933" observedRunningTime="2026-02-27 16:48:35.673670371 +0000 UTC m=+1477.820684828" watchObservedRunningTime="2026-02-27 16:48:35.67662792 +0000 UTC m=+1477.823642367" Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.796728 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.796782 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.837333 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 27 16:48:35 crc kubenswrapper[4751]: I0227 16:48:35.839786 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.665573 4751 generic.go:334] "Generic (PLEG): container finished" podID="596283dd-65b4-4e12-aada-02133b846729" containerID="ce9634e3d58b7868a1d2448d3492ee99835767a8174dabdc958d53dfcef5785b" exitCode=0 Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.665887 4751 generic.go:334] "Generic (PLEG): container finished" podID="596283dd-65b4-4e12-aada-02133b846729" containerID="5e9d13da4d7498f1d3beb27d630971361f8c5b0e5b9d41e463227a457f0630a4" exitCode=2 Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.665901 4751 generic.go:334] "Generic (PLEG): container finished" podID="596283dd-65b4-4e12-aada-02133b846729" containerID="3e58635380583d5984af4668f25c4d5a686648ddaf54a32a7d5d5acbb3d23426" exitCode=0 Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.665912 4751 generic.go:334] "Generic (PLEG): container finished" podID="596283dd-65b4-4e12-aada-02133b846729" containerID="dd6fc2dfc7352ff22fee3d0450c46d9ee15988df0afc980bf3431b375003b46c" exitCode=0 Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.666514 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"596283dd-65b4-4e12-aada-02133b846729","Type":"ContainerDied","Data":"ce9634e3d58b7868a1d2448d3492ee99835767a8174dabdc958d53dfcef5785b"} Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.666566 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"596283dd-65b4-4e12-aada-02133b846729","Type":"ContainerDied","Data":"5e9d13da4d7498f1d3beb27d630971361f8c5b0e5b9d41e463227a457f0630a4"} Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.666580 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"596283dd-65b4-4e12-aada-02133b846729","Type":"ContainerDied","Data":"3e58635380583d5984af4668f25c4d5a686648ddaf54a32a7d5d5acbb3d23426"} Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.666592 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"596283dd-65b4-4e12-aada-02133b846729","Type":"ContainerDied","Data":"dd6fc2dfc7352ff22fee3d0450c46d9ee15988df0afc980bf3431b375003b46c"} Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.667479 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.667675 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 27 16:48:36 crc kubenswrapper[4751]: I0227 16:48:36.861937 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.010482 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-combined-ca-bundle\") pod \"596283dd-65b4-4e12-aada-02133b846729\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.010553 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-log-httpd\") pod \"596283dd-65b4-4e12-aada-02133b846729\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.010580 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-sg-core-conf-yaml\") pod \"596283dd-65b4-4e12-aada-02133b846729\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.010681 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-scripts\") pod \"596283dd-65b4-4e12-aada-02133b846729\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.010718 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-config-data\") pod \"596283dd-65b4-4e12-aada-02133b846729\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.010747 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lg6gl\" (UniqueName: \"kubernetes.io/projected/596283dd-65b4-4e12-aada-02133b846729-kube-api-access-lg6gl\") pod \"596283dd-65b4-4e12-aada-02133b846729\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.010778 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-run-httpd\") pod \"596283dd-65b4-4e12-aada-02133b846729\" (UID: \"596283dd-65b4-4e12-aada-02133b846729\") " Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.011142 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "596283dd-65b4-4e12-aada-02133b846729" (UID: "596283dd-65b4-4e12-aada-02133b846729"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.011528 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "596283dd-65b4-4e12-aada-02133b846729" (UID: "596283dd-65b4-4e12-aada-02133b846729"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.011567 4751 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.016139 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/596283dd-65b4-4e12-aada-02133b846729-kube-api-access-lg6gl" (OuterVolumeSpecName: "kube-api-access-lg6gl") pod "596283dd-65b4-4e12-aada-02133b846729" (UID: "596283dd-65b4-4e12-aada-02133b846729"). InnerVolumeSpecName "kube-api-access-lg6gl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.016593 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-scripts" (OuterVolumeSpecName: "scripts") pod "596283dd-65b4-4e12-aada-02133b846729" (UID: "596283dd-65b4-4e12-aada-02133b846729"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.039104 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "596283dd-65b4-4e12-aada-02133b846729" (UID: "596283dd-65b4-4e12-aada-02133b846729"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.104033 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "596283dd-65b4-4e12-aada-02133b846729" (UID: "596283dd-65b4-4e12-aada-02133b846729"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.110441 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-config-data" (OuterVolumeSpecName: "config-data") pod "596283dd-65b4-4e12-aada-02133b846729" (UID: "596283dd-65b4-4e12-aada-02133b846729"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.114327 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.114373 4751 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.114394 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.114435 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/596283dd-65b4-4e12-aada-02133b846729-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.114455 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lg6gl\" (UniqueName: \"kubernetes.io/projected/596283dd-65b4-4e12-aada-02133b846729-kube-api-access-lg6gl\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.114477 4751 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/596283dd-65b4-4e12-aada-02133b846729-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.251333 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.251378 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.366896 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.377669 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.681437 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"596283dd-65b4-4e12-aada-02133b846729","Type":"ContainerDied","Data":"3b0f96dda3c406e0b40fafb567684ced5edf6a139cdd53c7e8e12a629cf6d46c"} Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.681769 4751 scope.go:117] "RemoveContainer" containerID="ce9634e3d58b7868a1d2448d3492ee99835767a8174dabdc958d53dfcef5785b" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.681463 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.682639 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.682660 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.716577 4751 scope.go:117] "RemoveContainer" containerID="5e9d13da4d7498f1d3beb27d630971361f8c5b0e5b9d41e463227a457f0630a4" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.732209 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.745839 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.754782 4751 scope.go:117] "RemoveContainer" containerID="3e58635380583d5984af4668f25c4d5a686648ddaf54a32a7d5d5acbb3d23426" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.774773 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:37 crc kubenswrapper[4751]: E0227 16:48:37.775657 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="ceilometer-central-agent" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.775674 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="ceilometer-central-agent" Feb 27 16:48:37 crc kubenswrapper[4751]: E0227 16:48:37.775681 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="proxy-httpd" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.775687 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="proxy-httpd" Feb 27 16:48:37 crc kubenswrapper[4751]: E0227 16:48:37.775706 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="sg-core" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.775712 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="sg-core" Feb 27 16:48:37 crc kubenswrapper[4751]: E0227 16:48:37.775741 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="ceilometer-notification-agent" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.775747 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="ceilometer-notification-agent" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.775898 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="ceilometer-notification-agent" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.775918 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="proxy-httpd" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.775931 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="sg-core" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.775947 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="596283dd-65b4-4e12-aada-02133b846729" containerName="ceilometer-central-agent" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.777457 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.780210 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.780561 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.800088 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.820205 4751 scope.go:117] "RemoveContainer" containerID="dd6fc2dfc7352ff22fee3d0450c46d9ee15988df0afc980bf3431b375003b46c" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.927511 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-config-data\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.927564 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gm8t7\" (UniqueName: \"kubernetes.io/projected/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-kube-api-access-gm8t7\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.927606 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-scripts\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.927658 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-run-httpd\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.927683 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.927730 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-log-httpd\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:37 crc kubenswrapper[4751]: I0227 16:48:37.927780 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.029768 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-config-data\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.029805 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gm8t7\" (UniqueName: \"kubernetes.io/projected/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-kube-api-access-gm8t7\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.029834 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-scripts\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.029868 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-run-httpd\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.029881 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.029910 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-log-httpd\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.029943 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.030641 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-run-httpd\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.030717 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-log-httpd\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.036738 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.036911 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-config-data\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.037576 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.047351 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-scripts\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.067553 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gm8t7\" (UniqueName: \"kubernetes.io/projected/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-kube-api-access-gm8t7\") pod \"ceilometer-0\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.123365 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.533673 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="596283dd-65b4-4e12-aada-02133b846729" path="/var/lib/kubelet/pods/596283dd-65b4-4e12-aada-02133b846729/volumes" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.614586 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:48:38 crc kubenswrapper[4751]: W0227 16:48:38.621058 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fac02e2_b5d9_4724_9373_9d4a8cfb6085.slice/crio-0e508df2e04db5628330c13d5053447e29601d821d13385a99fdad6a2b406241 WatchSource:0}: Error finding container 0e508df2e04db5628330c13d5053447e29601d821d13385a99fdad6a2b406241: Status 404 returned error can't find the container with id 0e508df2e04db5628330c13d5053447e29601d821d13385a99fdad6a2b406241 Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.696677 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4fac02e2-b5d9-4724-9373-9d4a8cfb6085","Type":"ContainerStarted","Data":"0e508df2e04db5628330c13d5053447e29601d821d13385a99fdad6a2b406241"} Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.698593 4751 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.698613 4751 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.743495 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 27 16:48:38 crc kubenswrapper[4751]: I0227 16:48:38.760799 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 27 16:48:39 crc kubenswrapper[4751]: I0227 16:48:39.705560 4751 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 27 16:48:39 crc kubenswrapper[4751]: I0227 16:48:39.705921 4751 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 27 16:48:39 crc kubenswrapper[4751]: I0227 16:48:39.769170 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:39 crc kubenswrapper[4751]: I0227 16:48:39.772920 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 27 16:48:40 crc kubenswrapper[4751]: I0227 16:48:40.716259 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4fac02e2-b5d9-4724-9373-9d4a8cfb6085","Type":"ContainerStarted","Data":"4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b"} Feb 27 16:48:43 crc kubenswrapper[4751]: I0227 16:48:43.751194 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4fac02e2-b5d9-4724-9373-9d4a8cfb6085","Type":"ContainerStarted","Data":"30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d"} Feb 27 16:48:44 crc kubenswrapper[4751]: I0227 16:48:44.394118 4751 scope.go:117] "RemoveContainer" containerID="2d396d144fb1623ac36f7774b720c3e6a1114543bd6fe318832210eda9c71045" Feb 27 16:48:44 crc kubenswrapper[4751]: I0227 16:48:44.765676 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4fac02e2-b5d9-4724-9373-9d4a8cfb6085","Type":"ContainerStarted","Data":"a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de"} Feb 27 16:48:46 crc kubenswrapper[4751]: I0227 16:48:46.801134 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4fac02e2-b5d9-4724-9373-9d4a8cfb6085","Type":"ContainerStarted","Data":"06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576"} Feb 27 16:48:46 crc kubenswrapper[4751]: I0227 16:48:46.801635 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 27 16:48:46 crc kubenswrapper[4751]: I0227 16:48:46.824415 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.869629867 podStartE2EDuration="9.824379269s" podCreationTimestamp="2026-02-27 16:48:37 +0000 UTC" firstStartedPulling="2026-02-27 16:48:38.623539361 +0000 UTC m=+1480.770553798" lastFinishedPulling="2026-02-27 16:48:45.578288743 +0000 UTC m=+1487.725303200" observedRunningTime="2026-02-27 16:48:46.817842174 +0000 UTC m=+1488.964856651" watchObservedRunningTime="2026-02-27 16:48:46.824379269 +0000 UTC m=+1488.971393716" Feb 27 16:48:48 crc kubenswrapper[4751]: I0227 16:48:48.827334 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fa0f5e2-5ce2-49ca-8873-4ef71aac0241" containerID="1fd43654ff8fd38e1d4035a1687be06f7000402fb97029086ff1253d7093fa1c" exitCode=0 Feb 27 16:48:48 crc kubenswrapper[4751]: I0227 16:48:48.827476 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-mcjmb" event={"ID":"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241","Type":"ContainerDied","Data":"1fd43654ff8fd38e1d4035a1687be06f7000402fb97029086ff1253d7093fa1c"} Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.236457 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.392469 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-combined-ca-bundle\") pod \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.392569 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcn7p\" (UniqueName: \"kubernetes.io/projected/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-kube-api-access-jcn7p\") pod \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.392675 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-scripts\") pod \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.392706 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-config-data\") pod \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\" (UID: \"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241\") " Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.398834 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-kube-api-access-jcn7p" (OuterVolumeSpecName: "kube-api-access-jcn7p") pod "2fa0f5e2-5ce2-49ca-8873-4ef71aac0241" (UID: "2fa0f5e2-5ce2-49ca-8873-4ef71aac0241"). InnerVolumeSpecName "kube-api-access-jcn7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.399647 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-scripts" (OuterVolumeSpecName: "scripts") pod "2fa0f5e2-5ce2-49ca-8873-4ef71aac0241" (UID: "2fa0f5e2-5ce2-49ca-8873-4ef71aac0241"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.419991 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2fa0f5e2-5ce2-49ca-8873-4ef71aac0241" (UID: "2fa0f5e2-5ce2-49ca-8873-4ef71aac0241"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.446587 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-config-data" (OuterVolumeSpecName: "config-data") pod "2fa0f5e2-5ce2-49ca-8873-4ef71aac0241" (UID: "2fa0f5e2-5ce2-49ca-8873-4ef71aac0241"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.494787 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.494820 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.494834 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.494846 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcn7p\" (UniqueName: \"kubernetes.io/projected/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241-kube-api-access-jcn7p\") on node \"crc\" DevicePath \"\"" Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.850777 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-mcjmb" event={"ID":"2fa0f5e2-5ce2-49ca-8873-4ef71aac0241","Type":"ContainerDied","Data":"128c04a25993ea107d641ea522d3b1d4424890eca15bbd46a467cfcf68966f29"} Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.850835 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="128c04a25993ea107d641ea522d3b1d4424890eca15bbd46a467cfcf68966f29" Feb 27 16:48:50 crc kubenswrapper[4751]: I0227 16:48:50.850909 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-mcjmb" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.009657 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 27 16:48:51 crc kubenswrapper[4751]: E0227 16:48:51.010104 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa0f5e2-5ce2-49ca-8873-4ef71aac0241" containerName="nova-cell0-conductor-db-sync" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.010124 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa0f5e2-5ce2-49ca-8873-4ef71aac0241" containerName="nova-cell0-conductor-db-sync" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.010374 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa0f5e2-5ce2-49ca-8873-4ef71aac0241" containerName="nova-cell0-conductor-db-sync" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.011353 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.014098 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.020182 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-kd2ms" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.027572 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.207584 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4wwb\" (UniqueName: \"kubernetes.io/projected/f501d880-21be-44e3-b015-05b79e226279-kube-api-access-x4wwb\") pod \"nova-cell0-conductor-0\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.207706 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.207814 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.309707 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.309793 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4wwb\" (UniqueName: \"kubernetes.io/projected/f501d880-21be-44e3-b015-05b79e226279-kube-api-access-x4wwb\") pod \"nova-cell0-conductor-0\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.309844 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.318884 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.318885 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.326054 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4wwb\" (UniqueName: \"kubernetes.io/projected/f501d880-21be-44e3-b015-05b79e226279-kube-api-access-x4wwb\") pod \"nova-cell0-conductor-0\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.331422 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.764399 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 27 16:48:51 crc kubenswrapper[4751]: W0227 16:48:51.769561 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf501d880_21be_44e3_b015_05b79e226279.slice/crio-6854150bcfe69bab52c313f36b167edc2ea7852a5af9744ef353f572a9de7103 WatchSource:0}: Error finding container 6854150bcfe69bab52c313f36b167edc2ea7852a5af9744ef353f572a9de7103: Status 404 returned error can't find the container with id 6854150bcfe69bab52c313f36b167edc2ea7852a5af9744ef353f572a9de7103 Feb 27 16:48:51 crc kubenswrapper[4751]: I0227 16:48:51.863697 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f501d880-21be-44e3-b015-05b79e226279","Type":"ContainerStarted","Data":"6854150bcfe69bab52c313f36b167edc2ea7852a5af9744ef353f572a9de7103"} Feb 27 16:48:52 crc kubenswrapper[4751]: I0227 16:48:52.877300 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f501d880-21be-44e3-b015-05b79e226279","Type":"ContainerStarted","Data":"da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5"} Feb 27 16:48:52 crc kubenswrapper[4751]: I0227 16:48:52.877692 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:52 crc kubenswrapper[4751]: I0227 16:48:52.901556 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.901520884 podStartE2EDuration="2.901520884s" podCreationTimestamp="2026-02-27 16:48:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:48:52.900475746 +0000 UTC m=+1495.047490203" watchObservedRunningTime="2026-02-27 16:48:52.901520884 +0000 UTC m=+1495.048535331" Feb 27 16:48:56 crc kubenswrapper[4751]: I0227 16:48:56.358133 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Feb 27 16:48:56 crc kubenswrapper[4751]: I0227 16:48:56.974269 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-tfsfv"] Feb 27 16:48:56 crc kubenswrapper[4751]: I0227 16:48:56.975340 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:56 crc kubenswrapper[4751]: I0227 16:48:56.978734 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Feb 27 16:48:56 crc kubenswrapper[4751]: I0227 16:48:56.979487 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Feb 27 16:48:56 crc kubenswrapper[4751]: I0227 16:48:56.998256 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-tfsfv"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.109785 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.111586 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.114964 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.122739 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9zj5\" (UniqueName: \"kubernetes.io/projected/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-kube-api-access-m9zj5\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.122804 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-config-data\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.122830 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.122853 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-scripts\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.138663 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.223912 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9zj5\" (UniqueName: \"kubernetes.io/projected/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-kube-api-access-m9zj5\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.224185 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kxc6\" (UniqueName: \"kubernetes.io/projected/3bdb66cf-24f8-46e3-baef-dc1ef718e027-kube-api-access-4kxc6\") pod \"nova-scheduler-0\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.224279 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-config-data\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.224365 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.224519 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-scripts\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.224681 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-config-data\") pod \"nova-scheduler-0\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.224764 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.229790 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-config-data\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.232025 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-scripts\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.233081 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.249728 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9zj5\" (UniqueName: \"kubernetes.io/projected/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-kube-api-access-m9zj5\") pod \"nova-cell0-cell-mapping-tfsfv\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.265964 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.267376 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.269762 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.278210 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.279307 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.285379 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.294564 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.298588 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.310982 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.327004 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kxc6\" (UniqueName: \"kubernetes.io/projected/3bdb66cf-24f8-46e3-baef-dc1ef718e027-kube-api-access-4kxc6\") pod \"nova-scheduler-0\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.327127 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-config-data\") pod \"nova-scheduler-0\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.327152 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.335155 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-config-data\") pod \"nova-scheduler-0\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.352013 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.374866 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kxc6\" (UniqueName: \"kubernetes.io/projected/3bdb66cf-24f8-46e3-baef-dc1ef718e027-kube-api-access-4kxc6\") pod \"nova-scheduler-0\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.415819 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.424264 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.428768 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.429243 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.429868 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghplk\" (UniqueName: \"kubernetes.io/projected/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-kube-api-access-ghplk\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.429920 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-logs\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.429962 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-config-data\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.429990 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vx4j\" (UniqueName: \"kubernetes.io/projected/1f64c674-22f4-48b5-ba58-d602b0c8a213-kube-api-access-6vx4j\") pod \"nova-cell1-novncproxy-0\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.430013 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.430045 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.430061 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.464936 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.534169 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4x4z\" (UniqueName: \"kubernetes.io/projected/a68dc9ec-3c77-43bc-af2d-69401752dad6-kube-api-access-d4x4z\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.534212 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.534247 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghplk\" (UniqueName: \"kubernetes.io/projected/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-kube-api-access-ghplk\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.534303 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-logs\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.534360 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-config-data\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.534411 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vx4j\" (UniqueName: \"kubernetes.io/projected/1f64c674-22f4-48b5-ba58-d602b0c8a213-kube-api-access-6vx4j\") pod \"nova-cell1-novncproxy-0\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.534438 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.534479 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a68dc9ec-3c77-43bc-af2d-69401752dad6-logs\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.534515 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.534537 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.535740 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-config-data\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.542173 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-logs\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.550127 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.562343 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-h8q5b"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.575152 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.578351 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghplk\" (UniqueName: \"kubernetes.io/projected/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-kube-api-access-ghplk\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.578452 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-config-data\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.582230 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.586653 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vx4j\" (UniqueName: \"kubernetes.io/projected/1f64c674-22f4-48b5-ba58-d602b0c8a213-kube-api-access-6vx4j\") pod \"nova-cell1-novncproxy-0\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.599858 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.605288 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-h8q5b"] Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.639125 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a68dc9ec-3c77-43bc-af2d-69401752dad6-logs\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.639596 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-config-data\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.639772 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4x4z\" (UniqueName: \"kubernetes.io/projected/a68dc9ec-3c77-43bc-af2d-69401752dad6-kube-api-access-d4x4z\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.639848 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.641559 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a68dc9ec-3c77-43bc-af2d-69401752dad6-logs\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.657885 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.657914 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-config-data\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.669771 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4x4z\" (UniqueName: \"kubernetes.io/projected/a68dc9ec-3c77-43bc-af2d-69401752dad6-kube-api-access-d4x4z\") pod \"nova-metadata-0\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.745750 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.745844 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6r5xd\" (UniqueName: \"kubernetes.io/projected/e4c57506-90e1-43e2-afff-4038aedef2b4-kube-api-access-6r5xd\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.745900 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.745922 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.745952 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-config\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.745980 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-svc\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.768601 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.790540 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.859001 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.859132 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6r5xd\" (UniqueName: \"kubernetes.io/projected/e4c57506-90e1-43e2-afff-4038aedef2b4-kube-api-access-6r5xd\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.859220 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.859241 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.859270 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-config\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.859306 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-svc\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.860163 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-svc\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.860247 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.860735 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.860847 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.861243 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-config\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.882764 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6r5xd\" (UniqueName: \"kubernetes.io/projected/e4c57506-90e1-43e2-afff-4038aedef2b4-kube-api-access-6r5xd\") pod \"dnsmasq-dns-757b4f8459-h8q5b\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.944926 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:48:57 crc kubenswrapper[4751]: I0227 16:48:57.994847 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.046429 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-d6fpf"] Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.053560 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.056484 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.056633 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.068818 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-tfsfv"] Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.082106 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-d6fpf"] Feb 27 16:48:58 crc kubenswrapper[4751]: W0227 16:48:58.098619 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4e28eb4_4e14_4cc7_b2be_a5cce68dfeae.slice/crio-1e05459afa03625e8d7eefd117f6947210235c7970492bfcf64bd9f203bd2f58 WatchSource:0}: Error finding container 1e05459afa03625e8d7eefd117f6947210235c7970492bfcf64bd9f203bd2f58: Status 404 returned error can't find the container with id 1e05459afa03625e8d7eefd117f6947210235c7970492bfcf64bd9f203bd2f58 Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.167539 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcmbb\" (UniqueName: \"kubernetes.io/projected/25f3f7ef-8fae-4ae3-812c-27d2fb474723-kube-api-access-bcmbb\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.167639 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.167722 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-scripts\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.167913 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-config-data\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.194357 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.269642 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.269792 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-scripts\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.269839 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-config-data\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.269869 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcmbb\" (UniqueName: \"kubernetes.io/projected/25f3f7ef-8fae-4ae3-812c-27d2fb474723-kube-api-access-bcmbb\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.274121 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-scripts\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.274768 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-config-data\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.275552 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.288442 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcmbb\" (UniqueName: \"kubernetes.io/projected/25f3f7ef-8fae-4ae3-812c-27d2fb474723-kube-api-access-bcmbb\") pod \"nova-cell1-conductor-db-sync-d6fpf\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.361725 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.386833 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.387298 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:48:58 crc kubenswrapper[4751]: W0227 16:48:58.406555 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f4b9484_da3d_4fa7_9c92_a64df3aa7ec2.slice/crio-e6889a17adea8cdfcd5b73e243618e2b40ee14506095d54fc307ff19938e8907 WatchSource:0}: Error finding container e6889a17adea8cdfcd5b73e243618e2b40ee14506095d54fc307ff19938e8907: Status 404 returned error can't find the container with id e6889a17adea8cdfcd5b73e243618e2b40ee14506095d54fc307ff19938e8907 Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.423672 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.693127 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-h8q5b"] Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.918881 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.919239 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.943577 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1f64c674-22f4-48b5-ba58-d602b0c8a213","Type":"ContainerStarted","Data":"e2637e4652b2fc035f9fe72ffdbddd1e4eb824ed6bd35d681c5c173686468b15"} Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.950862 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3bdb66cf-24f8-46e3-baef-dc1ef718e027","Type":"ContainerStarted","Data":"0072f04ddb27be64a9d020f3216d5866d0114dbbd5ba96827cccfd16677060a4"} Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.951331 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-d6fpf"] Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.953109 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a68dc9ec-3c77-43bc-af2d-69401752dad6","Type":"ContainerStarted","Data":"f3f5bf450a40d9932d15076b95f544b91f225b1fa33b6b53b500778633c55d73"} Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.956286 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tfsfv" event={"ID":"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae","Type":"ContainerStarted","Data":"a4d187eda36ea4cc354f922213b9dd6fed260eafa6826d3e2f70a01152740544"} Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.956334 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tfsfv" event={"ID":"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae","Type":"ContainerStarted","Data":"1e05459afa03625e8d7eefd117f6947210235c7970492bfcf64bd9f203bd2f58"} Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.959038 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2","Type":"ContainerStarted","Data":"e6889a17adea8cdfcd5b73e243618e2b40ee14506095d54fc307ff19938e8907"} Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.961599 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" event={"ID":"e4c57506-90e1-43e2-afff-4038aedef2b4","Type":"ContainerStarted","Data":"4bb8ac2965ea3e27832499f995e8f44fa9f39689a1f9ce5a104eacd91f6952b6"} Feb 27 16:48:58 crc kubenswrapper[4751]: I0227 16:48:58.982187 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-tfsfv" podStartSLOduration=2.982168532 podStartE2EDuration="2.982168532s" podCreationTimestamp="2026-02-27 16:48:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:48:58.972509724 +0000 UTC m=+1501.119524171" watchObservedRunningTime="2026-02-27 16:48:58.982168532 +0000 UTC m=+1501.129182989" Feb 27 16:48:59 crc kubenswrapper[4751]: I0227 16:48:59.973690 4751 generic.go:334] "Generic (PLEG): container finished" podID="e4c57506-90e1-43e2-afff-4038aedef2b4" containerID="ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3" exitCode=0 Feb 27 16:48:59 crc kubenswrapper[4751]: I0227 16:48:59.973761 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" event={"ID":"e4c57506-90e1-43e2-afff-4038aedef2b4","Type":"ContainerDied","Data":"ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3"} Feb 27 16:48:59 crc kubenswrapper[4751]: I0227 16:48:59.976510 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-d6fpf" event={"ID":"25f3f7ef-8fae-4ae3-812c-27d2fb474723","Type":"ContainerStarted","Data":"4d65bcc508221ab8e89426f199f38452c9ba3344c8dcef725f5e51974856df60"} Feb 27 16:48:59 crc kubenswrapper[4751]: I0227 16:48:59.976590 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-d6fpf" event={"ID":"25f3f7ef-8fae-4ae3-812c-27d2fb474723","Type":"ContainerStarted","Data":"8207ee33bf9e525ede03a6c973b18c4ce99d7f9f324f138dcf41e0703e0ff887"} Feb 27 16:49:00 crc kubenswrapper[4751]: I0227 16:49:00.017630 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-d6fpf" podStartSLOduration=2.017608754 podStartE2EDuration="2.017608754s" podCreationTimestamp="2026-02-27 16:48:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:00.008719627 +0000 UTC m=+1502.155734074" watchObservedRunningTime="2026-02-27 16:49:00.017608754 +0000 UTC m=+1502.164623211" Feb 27 16:49:01 crc kubenswrapper[4751]: I0227 16:49:01.082492 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:01 crc kubenswrapper[4751]: I0227 16:49:01.164329 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:49:01 crc kubenswrapper[4751]: I0227 16:49:01.995771 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3bdb66cf-24f8-46e3-baef-dc1ef718e027","Type":"ContainerStarted","Data":"03351db41ca8520163d1679db83c712b3cbcb947ebc93b3da6924d9970f1a30b"} Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.000764 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a68dc9ec-3c77-43bc-af2d-69401752dad6","Type":"ContainerStarted","Data":"0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa"} Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.002799 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2","Type":"ContainerStarted","Data":"c27cbe383a6f8bb61421a6d0a937ad659d75ba2ff6918a7336852656c87f2389"} Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.006289 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" event={"ID":"e4c57506-90e1-43e2-afff-4038aedef2b4","Type":"ContainerStarted","Data":"da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33"} Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.006380 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.008291 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1f64c674-22f4-48b5-ba58-d602b0c8a213","Type":"ContainerStarted","Data":"c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1"} Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.008360 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="1f64c674-22f4-48b5-ba58-d602b0c8a213" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1" gracePeriod=30 Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.015471 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.915737804 podStartE2EDuration="5.015451523s" podCreationTimestamp="2026-02-27 16:48:57 +0000 UTC" firstStartedPulling="2026-02-27 16:48:58.238342152 +0000 UTC m=+1500.385356609" lastFinishedPulling="2026-02-27 16:49:01.338055881 +0000 UTC m=+1503.485070328" observedRunningTime="2026-02-27 16:49:02.010199643 +0000 UTC m=+1504.157214090" watchObservedRunningTime="2026-02-27 16:49:02.015451523 +0000 UTC m=+1504.162465970" Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.036568 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" podStartSLOduration=5.036548484 podStartE2EDuration="5.036548484s" podCreationTimestamp="2026-02-27 16:48:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:02.030481153 +0000 UTC m=+1504.177495600" watchObservedRunningTime="2026-02-27 16:49:02.036548484 +0000 UTC m=+1504.183562931" Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.060078 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.102072043 podStartE2EDuration="5.0600624s" podCreationTimestamp="2026-02-27 16:48:57 +0000 UTC" firstStartedPulling="2026-02-27 16:48:58.41182175 +0000 UTC m=+1500.558836197" lastFinishedPulling="2026-02-27 16:49:01.369812107 +0000 UTC m=+1503.516826554" observedRunningTime="2026-02-27 16:49:02.055290963 +0000 UTC m=+1504.202305410" watchObservedRunningTime="2026-02-27 16:49:02.0600624 +0000 UTC m=+1504.207076847" Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.079711 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.162002609 podStartE2EDuration="5.079693343s" podCreationTimestamp="2026-02-27 16:48:57 +0000 UTC" firstStartedPulling="2026-02-27 16:48:58.406595981 +0000 UTC m=+1500.553610428" lastFinishedPulling="2026-02-27 16:49:01.324286715 +0000 UTC m=+1503.471301162" observedRunningTime="2026-02-27 16:49:02.069231084 +0000 UTC m=+1504.216245531" watchObservedRunningTime="2026-02-27 16:49:02.079693343 +0000 UTC m=+1504.226707780" Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.430159 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 27 16:49:02 crc kubenswrapper[4751]: I0227 16:49:02.791257 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.023065 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a68dc9ec-3c77-43bc-af2d-69401752dad6","Type":"ContainerStarted","Data":"98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7"} Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.023336 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a68dc9ec-3c77-43bc-af2d-69401752dad6" containerName="nova-metadata-metadata" containerID="cri-o://98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7" gracePeriod=30 Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.023350 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a68dc9ec-3c77-43bc-af2d-69401752dad6" containerName="nova-metadata-log" containerID="cri-o://0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa" gracePeriod=30 Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.027821 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2","Type":"ContainerStarted","Data":"cb8104614ecfecafedc2ec0ef6e1d7ee8a8d6fae2a025fd82fad75fbcae2631e"} Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.057168 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.158448302 podStartE2EDuration="6.057130631s" podCreationTimestamp="2026-02-27 16:48:57 +0000 UTC" firstStartedPulling="2026-02-27 16:48:58.424139597 +0000 UTC m=+1500.571154044" lastFinishedPulling="2026-02-27 16:49:01.322821926 +0000 UTC m=+1503.469836373" observedRunningTime="2026-02-27 16:49:03.048049749 +0000 UTC m=+1505.195064206" watchObservedRunningTime="2026-02-27 16:49:03.057130631 +0000 UTC m=+1505.204145098" Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.624289 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.799627 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a68dc9ec-3c77-43bc-af2d-69401752dad6-logs\") pod \"a68dc9ec-3c77-43bc-af2d-69401752dad6\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.800681 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-combined-ca-bundle\") pod \"a68dc9ec-3c77-43bc-af2d-69401752dad6\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.800514 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a68dc9ec-3c77-43bc-af2d-69401752dad6-logs" (OuterVolumeSpecName: "logs") pod "a68dc9ec-3c77-43bc-af2d-69401752dad6" (UID: "a68dc9ec-3c77-43bc-af2d-69401752dad6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.800717 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-config-data\") pod \"a68dc9ec-3c77-43bc-af2d-69401752dad6\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.800880 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4x4z\" (UniqueName: \"kubernetes.io/projected/a68dc9ec-3c77-43bc-af2d-69401752dad6-kube-api-access-d4x4z\") pod \"a68dc9ec-3c77-43bc-af2d-69401752dad6\" (UID: \"a68dc9ec-3c77-43bc-af2d-69401752dad6\") " Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.802011 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a68dc9ec-3c77-43bc-af2d-69401752dad6-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.816896 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a68dc9ec-3c77-43bc-af2d-69401752dad6-kube-api-access-d4x4z" (OuterVolumeSpecName: "kube-api-access-d4x4z") pod "a68dc9ec-3c77-43bc-af2d-69401752dad6" (UID: "a68dc9ec-3c77-43bc-af2d-69401752dad6"). InnerVolumeSpecName "kube-api-access-d4x4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.830690 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a68dc9ec-3c77-43bc-af2d-69401752dad6" (UID: "a68dc9ec-3c77-43bc-af2d-69401752dad6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.833172 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-config-data" (OuterVolumeSpecName: "config-data") pod "a68dc9ec-3c77-43bc-af2d-69401752dad6" (UID: "a68dc9ec-3c77-43bc-af2d-69401752dad6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.904428 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.904461 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a68dc9ec-3c77-43bc-af2d-69401752dad6-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:03 crc kubenswrapper[4751]: I0227 16:49:03.904472 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4x4z\" (UniqueName: \"kubernetes.io/projected/a68dc9ec-3c77-43bc-af2d-69401752dad6-kube-api-access-d4x4z\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.056703 4751 generic.go:334] "Generic (PLEG): container finished" podID="a68dc9ec-3c77-43bc-af2d-69401752dad6" containerID="98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7" exitCode=0 Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.056748 4751 generic.go:334] "Generic (PLEG): container finished" podID="a68dc9ec-3c77-43bc-af2d-69401752dad6" containerID="0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa" exitCode=143 Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.058057 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.063661 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a68dc9ec-3c77-43bc-af2d-69401752dad6","Type":"ContainerDied","Data":"98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7"} Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.063728 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a68dc9ec-3c77-43bc-af2d-69401752dad6","Type":"ContainerDied","Data":"0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa"} Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.063750 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a68dc9ec-3c77-43bc-af2d-69401752dad6","Type":"ContainerDied","Data":"f3f5bf450a40d9932d15076b95f544b91f225b1fa33b6b53b500778633c55d73"} Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.063778 4751 scope.go:117] "RemoveContainer" containerID="98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.102873 4751 scope.go:117] "RemoveContainer" containerID="0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.109018 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.118483 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.127527 4751 scope.go:117] "RemoveContainer" containerID="98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7" Feb 27 16:49:04 crc kubenswrapper[4751]: E0227 16:49:04.128645 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7\": container with ID starting with 98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7 not found: ID does not exist" containerID="98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.128716 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7"} err="failed to get container status \"98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7\": rpc error: code = NotFound desc = could not find container \"98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7\": container with ID starting with 98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7 not found: ID does not exist" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.128762 4751 scope.go:117] "RemoveContainer" containerID="0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa" Feb 27 16:49:04 crc kubenswrapper[4751]: E0227 16:49:04.131070 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa\": container with ID starting with 0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa not found: ID does not exist" containerID="0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.131109 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa"} err="failed to get container status \"0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa\": rpc error: code = NotFound desc = could not find container \"0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa\": container with ID starting with 0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa not found: ID does not exist" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.131135 4751 scope.go:117] "RemoveContainer" containerID="98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.133474 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7"} err="failed to get container status \"98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7\": rpc error: code = NotFound desc = could not find container \"98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7\": container with ID starting with 98747ce5f799a5d5b65f04533f75d98601fcf74542456fc4f3598dda1e2c86d7 not found: ID does not exist" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.133553 4751 scope.go:117] "RemoveContainer" containerID="0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.135524 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa"} err="failed to get container status \"0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa\": rpc error: code = NotFound desc = could not find container \"0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa\": container with ID starting with 0bfb8c164c908a470f0a635e7b09d18f7db287d61448ae420fb8cb1e6d86b6aa not found: ID does not exist" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.140156 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:04 crc kubenswrapper[4751]: E0227 16:49:04.140702 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a68dc9ec-3c77-43bc-af2d-69401752dad6" containerName="nova-metadata-metadata" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.140734 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a68dc9ec-3c77-43bc-af2d-69401752dad6" containerName="nova-metadata-metadata" Feb 27 16:49:04 crc kubenswrapper[4751]: E0227 16:49:04.140776 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a68dc9ec-3c77-43bc-af2d-69401752dad6" containerName="nova-metadata-log" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.140789 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a68dc9ec-3c77-43bc-af2d-69401752dad6" containerName="nova-metadata-log" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.141087 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a68dc9ec-3c77-43bc-af2d-69401752dad6" containerName="nova-metadata-log" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.141115 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a68dc9ec-3c77-43bc-af2d-69401752dad6" containerName="nova-metadata-metadata" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.155708 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.159356 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.160808 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.209504 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.313936 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0761fed0-37d3-4775-9a7b-23cde719205c-logs\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.314080 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-config-data\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.314110 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.314135 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.314242 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svdm8\" (UniqueName: \"kubernetes.io/projected/0761fed0-37d3-4775-9a7b-23cde719205c-kube-api-access-svdm8\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.416129 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svdm8\" (UniqueName: \"kubernetes.io/projected/0761fed0-37d3-4775-9a7b-23cde719205c-kube-api-access-svdm8\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.416649 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0761fed0-37d3-4775-9a7b-23cde719205c-logs\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.417041 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0761fed0-37d3-4775-9a7b-23cde719205c-logs\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.417703 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.417744 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-config-data\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.417761 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.422880 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.423179 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-config-data\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.425458 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.440983 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svdm8\" (UniqueName: \"kubernetes.io/projected/0761fed0-37d3-4775-9a7b-23cde719205c-kube-api-access-svdm8\") pod \"nova-metadata-0\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.507520 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:04 crc kubenswrapper[4751]: I0227 16:49:04.537791 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a68dc9ec-3c77-43bc-af2d-69401752dad6" path="/var/lib/kubelet/pods/a68dc9ec-3c77-43bc-af2d-69401752dad6/volumes" Feb 27 16:49:05 crc kubenswrapper[4751]: W0227 16:49:05.014796 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0761fed0_37d3_4775_9a7b_23cde719205c.slice/crio-7b22518ac2bc7ba2dc5ed76aaf9df4d6e49d2012bebaf919824465005dfea543 WatchSource:0}: Error finding container 7b22518ac2bc7ba2dc5ed76aaf9df4d6e49d2012bebaf919824465005dfea543: Status 404 returned error can't find the container with id 7b22518ac2bc7ba2dc5ed76aaf9df4d6e49d2012bebaf919824465005dfea543 Feb 27 16:49:05 crc kubenswrapper[4751]: I0227 16:49:05.020389 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:05 crc kubenswrapper[4751]: I0227 16:49:05.071226 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0761fed0-37d3-4775-9a7b-23cde719205c","Type":"ContainerStarted","Data":"7b22518ac2bc7ba2dc5ed76aaf9df4d6e49d2012bebaf919824465005dfea543"} Feb 27 16:49:06 crc kubenswrapper[4751]: I0227 16:49:06.091996 4751 generic.go:334] "Generic (PLEG): container finished" podID="a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae" containerID="a4d187eda36ea4cc354f922213b9dd6fed260eafa6826d3e2f70a01152740544" exitCode=0 Feb 27 16:49:06 crc kubenswrapper[4751]: I0227 16:49:06.092098 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tfsfv" event={"ID":"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae","Type":"ContainerDied","Data":"a4d187eda36ea4cc354f922213b9dd6fed260eafa6826d3e2f70a01152740544"} Feb 27 16:49:06 crc kubenswrapper[4751]: I0227 16:49:06.098883 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0761fed0-37d3-4775-9a7b-23cde719205c","Type":"ContainerStarted","Data":"192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1"} Feb 27 16:49:06 crc kubenswrapper[4751]: I0227 16:49:06.101595 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0761fed0-37d3-4775-9a7b-23cde719205c","Type":"ContainerStarted","Data":"c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966"} Feb 27 16:49:06 crc kubenswrapper[4751]: I0227 16:49:06.159046 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.159023659 podStartE2EDuration="2.159023659s" podCreationTimestamp="2026-02-27 16:49:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:06.140949207 +0000 UTC m=+1508.287963664" watchObservedRunningTime="2026-02-27 16:49:06.159023659 +0000 UTC m=+1508.306038116" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.115265 4751 generic.go:334] "Generic (PLEG): container finished" podID="25f3f7ef-8fae-4ae3-812c-27d2fb474723" containerID="4d65bcc508221ab8e89426f199f38452c9ba3344c8dcef725f5e51974856df60" exitCode=0 Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.115378 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-d6fpf" event={"ID":"25f3f7ef-8fae-4ae3-812c-27d2fb474723","Type":"ContainerDied","Data":"4d65bcc508221ab8e89426f199f38452c9ba3344c8dcef725f5e51974856df60"} Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.430048 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.473832 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.635894 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.725228 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-combined-ca-bundle\") pod \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.725579 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-scripts\") pod \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.725686 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-config-data\") pod \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.725728 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9zj5\" (UniqueName: \"kubernetes.io/projected/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-kube-api-access-m9zj5\") pod \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\" (UID: \"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae\") " Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.731394 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-kube-api-access-m9zj5" (OuterVolumeSpecName: "kube-api-access-m9zj5") pod "a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae" (UID: "a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae"). InnerVolumeSpecName "kube-api-access-m9zj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.731699 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-scripts" (OuterVolumeSpecName: "scripts") pod "a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae" (UID: "a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.761105 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae" (UID: "a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.770389 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.770451 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.777908 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-config-data" (OuterVolumeSpecName: "config-data") pod "a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae" (UID: "a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.828654 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.828736 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.828786 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.828810 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9zj5\" (UniqueName: \"kubernetes.io/projected/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae-kube-api-access-m9zj5\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:07 crc kubenswrapper[4751]: I0227 16:49:07.996684 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.077803 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-d8ldz"] Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.078081 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" podUID="b4cff4a9-9681-46d5-8c65-03812065b51e" containerName="dnsmasq-dns" containerID="cri-o://bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d" gracePeriod=10 Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.132643 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.143592 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tfsfv" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.144501 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tfsfv" event={"ID":"a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae","Type":"ContainerDied","Data":"1e05459afa03625e8d7eefd117f6947210235c7970492bfcf64bd9f203bd2f58"} Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.144551 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e05459afa03625e8d7eefd117f6947210235c7970492bfcf64bd9f203bd2f58" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.211666 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.359691 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.359900 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerName="nova-api-log" containerID="cri-o://c27cbe383a6f8bb61421a6d0a937ad659d75ba2ff6918a7336852656c87f2389" gracePeriod=30 Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.360021 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerName="nova-api-api" containerID="cri-o://cb8104614ecfecafedc2ec0ef6e1d7ee8a8d6fae2a025fd82fad75fbcae2631e" gracePeriod=30 Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.369186 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": EOF" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.369186 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": EOF" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.378941 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.379157 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0761fed0-37d3-4775-9a7b-23cde719205c" containerName="nova-metadata-log" containerID="cri-o://c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966" gracePeriod=30 Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.379291 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0761fed0-37d3-4775-9a7b-23cde719205c" containerName="nova-metadata-metadata" containerID="cri-o://192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1" gracePeriod=30 Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.792001 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.794070 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.816518 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.855319 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-config-data\") pod \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.855419 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-sb\") pod \"b4cff4a9-9681-46d5-8c65-03812065b51e\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.855522 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-config\") pod \"b4cff4a9-9681-46d5-8c65-03812065b51e\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.855543 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-swift-storage-0\") pod \"b4cff4a9-9681-46d5-8c65-03812065b51e\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.855569 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-svc\") pod \"b4cff4a9-9681-46d5-8c65-03812065b51e\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.855605 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-combined-ca-bundle\") pod \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.855646 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-scripts\") pod \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.855722 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ct2mf\" (UniqueName: \"kubernetes.io/projected/b4cff4a9-9681-46d5-8c65-03812065b51e-kube-api-access-ct2mf\") pod \"b4cff4a9-9681-46d5-8c65-03812065b51e\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.855740 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-nb\") pod \"b4cff4a9-9681-46d5-8c65-03812065b51e\" (UID: \"b4cff4a9-9681-46d5-8c65-03812065b51e\") " Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.855776 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcmbb\" (UniqueName: \"kubernetes.io/projected/25f3f7ef-8fae-4ae3-812c-27d2fb474723-kube-api-access-bcmbb\") pod \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\" (UID: \"25f3f7ef-8fae-4ae3-812c-27d2fb474723\") " Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.864636 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4cff4a9-9681-46d5-8c65-03812065b51e-kube-api-access-ct2mf" (OuterVolumeSpecName: "kube-api-access-ct2mf") pod "b4cff4a9-9681-46d5-8c65-03812065b51e" (UID: "b4cff4a9-9681-46d5-8c65-03812065b51e"). InnerVolumeSpecName "kube-api-access-ct2mf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.865561 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-scripts" (OuterVolumeSpecName: "scripts") pod "25f3f7ef-8fae-4ae3-812c-27d2fb474723" (UID: "25f3f7ef-8fae-4ae3-812c-27d2fb474723"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.889728 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25f3f7ef-8fae-4ae3-812c-27d2fb474723-kube-api-access-bcmbb" (OuterVolumeSpecName: "kube-api-access-bcmbb") pod "25f3f7ef-8fae-4ae3-812c-27d2fb474723" (UID: "25f3f7ef-8fae-4ae3-812c-27d2fb474723"). InnerVolumeSpecName "kube-api-access-bcmbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.890258 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-config-data" (OuterVolumeSpecName: "config-data") pod "25f3f7ef-8fae-4ae3-812c-27d2fb474723" (UID: "25f3f7ef-8fae-4ae3-812c-27d2fb474723"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.914185 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b4cff4a9-9681-46d5-8c65-03812065b51e" (UID: "b4cff4a9-9681-46d5-8c65-03812065b51e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.961197 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.961236 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ct2mf\" (UniqueName: \"kubernetes.io/projected/b4cff4a9-9681-46d5-8c65-03812065b51e-kube-api-access-ct2mf\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.961246 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcmbb\" (UniqueName: \"kubernetes.io/projected/25f3f7ef-8fae-4ae3-812c-27d2fb474723-kube-api-access-bcmbb\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.961255 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.961264 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.962783 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b4cff4a9-9681-46d5-8c65-03812065b51e" (UID: "b4cff4a9-9681-46d5-8c65-03812065b51e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.977019 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b4cff4a9-9681-46d5-8c65-03812065b51e" (UID: "b4cff4a9-9681-46d5-8c65-03812065b51e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.981579 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "25f3f7ef-8fae-4ae3-812c-27d2fb474723" (UID: "25f3f7ef-8fae-4ae3-812c-27d2fb474723"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.988990 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b4cff4a9-9681-46d5-8c65-03812065b51e" (UID: "b4cff4a9-9681-46d5-8c65-03812065b51e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:49:08 crc kubenswrapper[4751]: I0227 16:49:08.990925 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-config" (OuterVolumeSpecName: "config") pod "b4cff4a9-9681-46d5-8c65-03812065b51e" (UID: "b4cff4a9-9681-46d5-8c65-03812065b51e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.031284 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.062254 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svdm8\" (UniqueName: \"kubernetes.io/projected/0761fed0-37d3-4775-9a7b-23cde719205c-kube-api-access-svdm8\") pod \"0761fed0-37d3-4775-9a7b-23cde719205c\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.062321 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-nova-metadata-tls-certs\") pod \"0761fed0-37d3-4775-9a7b-23cde719205c\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.062418 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0761fed0-37d3-4775-9a7b-23cde719205c-logs\") pod \"0761fed0-37d3-4775-9a7b-23cde719205c\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.062590 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-config-data\") pod \"0761fed0-37d3-4775-9a7b-23cde719205c\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.062643 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-combined-ca-bundle\") pod \"0761fed0-37d3-4775-9a7b-23cde719205c\" (UID: \"0761fed0-37d3-4775-9a7b-23cde719205c\") " Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.063066 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0761fed0-37d3-4775-9a7b-23cde719205c-logs" (OuterVolumeSpecName: "logs") pod "0761fed0-37d3-4775-9a7b-23cde719205c" (UID: "0761fed0-37d3-4775-9a7b-23cde719205c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.063135 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.063155 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.063171 4751 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.063185 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4cff4a9-9681-46d5-8c65-03812065b51e-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.063197 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25f3f7ef-8fae-4ae3-812c-27d2fb474723-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.066577 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0761fed0-37d3-4775-9a7b-23cde719205c-kube-api-access-svdm8" (OuterVolumeSpecName: "kube-api-access-svdm8") pod "0761fed0-37d3-4775-9a7b-23cde719205c" (UID: "0761fed0-37d3-4775-9a7b-23cde719205c"). InnerVolumeSpecName "kube-api-access-svdm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.093526 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0761fed0-37d3-4775-9a7b-23cde719205c" (UID: "0761fed0-37d3-4775-9a7b-23cde719205c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.105053 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-config-data" (OuterVolumeSpecName: "config-data") pod "0761fed0-37d3-4775-9a7b-23cde719205c" (UID: "0761fed0-37d3-4775-9a7b-23cde719205c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.120084 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "0761fed0-37d3-4775-9a7b-23cde719205c" (UID: "0761fed0-37d3-4775-9a7b-23cde719205c"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.152779 4751 generic.go:334] "Generic (PLEG): container finished" podID="b4cff4a9-9681-46d5-8c65-03812065b51e" containerID="bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d" exitCode=0 Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.153332 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" event={"ID":"b4cff4a9-9681-46d5-8c65-03812065b51e","Type":"ContainerDied","Data":"bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d"} Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.153538 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" event={"ID":"b4cff4a9-9681-46d5-8c65-03812065b51e","Type":"ContainerDied","Data":"add9564ac1a48b896f39b2483f929486e5cf36622f460d98d00fe097ecea542b"} Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.153621 4751 scope.go:117] "RemoveContainer" containerID="bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.153795 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-d8ldz" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.164658 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-d6fpf" event={"ID":"25f3f7ef-8fae-4ae3-812c-27d2fb474723","Type":"ContainerDied","Data":"8207ee33bf9e525ede03a6c973b18c4ce99d7f9f324f138dcf41e0703e0ff887"} Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.164701 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8207ee33bf9e525ede03a6c973b18c4ce99d7f9f324f138dcf41e0703e0ff887" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.164790 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-d6fpf" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.168090 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.168221 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.168323 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svdm8\" (UniqueName: \"kubernetes.io/projected/0761fed0-37d3-4775-9a7b-23cde719205c-kube-api-access-svdm8\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.168411 4751 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0761fed0-37d3-4775-9a7b-23cde719205c-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.168486 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0761fed0-37d3-4775-9a7b-23cde719205c-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.172602 4751 generic.go:334] "Generic (PLEG): container finished" podID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerID="c27cbe383a6f8bb61421a6d0a937ad659d75ba2ff6918a7336852656c87f2389" exitCode=143 Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.172772 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2","Type":"ContainerDied","Data":"c27cbe383a6f8bb61421a6d0a937ad659d75ba2ff6918a7336852656c87f2389"} Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.175154 4751 generic.go:334] "Generic (PLEG): container finished" podID="0761fed0-37d3-4775-9a7b-23cde719205c" containerID="192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1" exitCode=0 Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.175197 4751 generic.go:334] "Generic (PLEG): container finished" podID="0761fed0-37d3-4775-9a7b-23cde719205c" containerID="c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966" exitCode=143 Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.175917 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0761fed0-37d3-4775-9a7b-23cde719205c","Type":"ContainerDied","Data":"192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1"} Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.175942 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.175960 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0761fed0-37d3-4775-9a7b-23cde719205c","Type":"ContainerDied","Data":"c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966"} Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.175974 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0761fed0-37d3-4775-9a7b-23cde719205c","Type":"ContainerDied","Data":"7b22518ac2bc7ba2dc5ed76aaf9df4d6e49d2012bebaf919824465005dfea543"} Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.204092 4751 scope.go:117] "RemoveContainer" containerID="556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.217555 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-d8ldz"] Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.245368 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-d8ldz"] Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.252783 4751 scope.go:117] "RemoveContainer" containerID="bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.254437 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 27 16:49:09 crc kubenswrapper[4751]: E0227 16:49:09.254855 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4cff4a9-9681-46d5-8c65-03812065b51e" containerName="dnsmasq-dns" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.254869 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4cff4a9-9681-46d5-8c65-03812065b51e" containerName="dnsmasq-dns" Feb 27 16:49:09 crc kubenswrapper[4751]: E0227 16:49:09.254883 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae" containerName="nova-manage" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.254889 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae" containerName="nova-manage" Feb 27 16:49:09 crc kubenswrapper[4751]: E0227 16:49:09.254902 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0761fed0-37d3-4775-9a7b-23cde719205c" containerName="nova-metadata-log" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.254907 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="0761fed0-37d3-4775-9a7b-23cde719205c" containerName="nova-metadata-log" Feb 27 16:49:09 crc kubenswrapper[4751]: E0227 16:49:09.254918 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25f3f7ef-8fae-4ae3-812c-27d2fb474723" containerName="nova-cell1-conductor-db-sync" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.254924 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="25f3f7ef-8fae-4ae3-812c-27d2fb474723" containerName="nova-cell1-conductor-db-sync" Feb 27 16:49:09 crc kubenswrapper[4751]: E0227 16:49:09.254935 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4cff4a9-9681-46d5-8c65-03812065b51e" containerName="init" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.254942 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4cff4a9-9681-46d5-8c65-03812065b51e" containerName="init" Feb 27 16:49:09 crc kubenswrapper[4751]: E0227 16:49:09.254965 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0761fed0-37d3-4775-9a7b-23cde719205c" containerName="nova-metadata-metadata" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.254970 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="0761fed0-37d3-4775-9a7b-23cde719205c" containerName="nova-metadata-metadata" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.255137 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4cff4a9-9681-46d5-8c65-03812065b51e" containerName="dnsmasq-dns" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.255149 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="0761fed0-37d3-4775-9a7b-23cde719205c" containerName="nova-metadata-log" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.255161 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="0761fed0-37d3-4775-9a7b-23cde719205c" containerName="nova-metadata-metadata" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.255183 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae" containerName="nova-manage" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.255192 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="25f3f7ef-8fae-4ae3-812c-27d2fb474723" containerName="nova-cell1-conductor-db-sync" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.255831 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.258058 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Feb 27 16:49:09 crc kubenswrapper[4751]: E0227 16:49:09.258080 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d\": container with ID starting with bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d not found: ID does not exist" containerID="bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.259229 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d"} err="failed to get container status \"bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d\": rpc error: code = NotFound desc = could not find container \"bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d\": container with ID starting with bec89c722ac587944d27ef87b766fd75fd312c86a28642bcd3e02f14f977d04d not found: ID does not exist" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.259264 4751 scope.go:117] "RemoveContainer" containerID="556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa" Feb 27 16:49:09 crc kubenswrapper[4751]: E0227 16:49:09.261044 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa\": container with ID starting with 556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa not found: ID does not exist" containerID="556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.261104 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa"} err="failed to get container status \"556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa\": rpc error: code = NotFound desc = could not find container \"556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa\": container with ID starting with 556cf53f4179fcf44c1d423e90d620c00346bb7cc74ba954c7e5c1c90d2510fa not found: ID does not exist" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.261140 4751 scope.go:117] "RemoveContainer" containerID="192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.274101 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.296412 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.308511 4751 scope.go:117] "RemoveContainer" containerID="c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.308546 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.332413 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.334725 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.338281 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.338502 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.347103 4751 scope.go:117] "RemoveContainer" containerID="192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1" Feb 27 16:49:09 crc kubenswrapper[4751]: E0227 16:49:09.349101 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1\": container with ID starting with 192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1 not found: ID does not exist" containerID="192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.349144 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1"} err="failed to get container status \"192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1\": rpc error: code = NotFound desc = could not find container \"192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1\": container with ID starting with 192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1 not found: ID does not exist" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.349171 4751 scope.go:117] "RemoveContainer" containerID="c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966" Feb 27 16:49:09 crc kubenswrapper[4751]: E0227 16:49:09.349776 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966\": container with ID starting with c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966 not found: ID does not exist" containerID="c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.349799 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966"} err="failed to get container status \"c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966\": rpc error: code = NotFound desc = could not find container \"c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966\": container with ID starting with c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966 not found: ID does not exist" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.349813 4751 scope.go:117] "RemoveContainer" containerID="192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.353973 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1"} err="failed to get container status \"192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1\": rpc error: code = NotFound desc = could not find container \"192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1\": container with ID starting with 192e26071dcfbeca623ca1fac5fa68c59efff8e8a1ffa7ba9b9b6cc25a077db1 not found: ID does not exist" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.354019 4751 scope.go:117] "RemoveContainer" containerID="c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.354868 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966"} err="failed to get container status \"c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966\": rpc error: code = NotFound desc = could not find container \"c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966\": container with ID starting with c321fc79ae326f18f025ae5b831cbe1be9bdc4733f8fca15f7b54fa2201b6966 not found: ID does not exist" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.362732 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.386027 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbwjf\" (UniqueName: \"kubernetes.io/projected/3634d5e3-a464-4b1b-91ef-bbe63f530d48-kube-api-access-cbwjf\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.386076 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6zh4\" (UniqueName: \"kubernetes.io/projected/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-kube-api-access-f6zh4\") pod \"nova-cell1-conductor-0\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.386096 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.386125 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.386161 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.386226 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-config-data\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.386244 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.386276 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3634d5e3-a464-4b1b-91ef-bbe63f530d48-logs\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.489973 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.490048 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.490138 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-config-data\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.490160 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.490192 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3634d5e3-a464-4b1b-91ef-bbe63f530d48-logs\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.490667 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3634d5e3-a464-4b1b-91ef-bbe63f530d48-logs\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.490717 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbwjf\" (UniqueName: \"kubernetes.io/projected/3634d5e3-a464-4b1b-91ef-bbe63f530d48-kube-api-access-cbwjf\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.490738 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6zh4\" (UniqueName: \"kubernetes.io/projected/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-kube-api-access-f6zh4\") pod \"nova-cell1-conductor-0\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.491061 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.494135 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.494389 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.495904 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-config-data\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.497782 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.509769 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.516023 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbwjf\" (UniqueName: \"kubernetes.io/projected/3634d5e3-a464-4b1b-91ef-bbe63f530d48-kube-api-access-cbwjf\") pod \"nova-metadata-0\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " pod="openstack/nova-metadata-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.516516 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6zh4\" (UniqueName: \"kubernetes.io/projected/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-kube-api-access-f6zh4\") pod \"nova-cell1-conductor-0\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.595889 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:09 crc kubenswrapper[4751]: I0227 16:49:09.663341 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:10 crc kubenswrapper[4751]: W0227 16:49:10.091871 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d8d9ed0_8606_47cb_a164_7e6bbac390cd.slice/crio-acf26164004e25883c932d9f0da8e1b26f131f371308f2367b0539bbdddade95 WatchSource:0}: Error finding container acf26164004e25883c932d9f0da8e1b26f131f371308f2367b0539bbdddade95: Status 404 returned error can't find the container with id acf26164004e25883c932d9f0da8e1b26f131f371308f2367b0539bbdddade95 Feb 27 16:49:10 crc kubenswrapper[4751]: I0227 16:49:10.102828 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 27 16:49:10 crc kubenswrapper[4751]: W0227 16:49:10.189827 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3634d5e3_a464_4b1b_91ef_bbe63f530d48.slice/crio-ca2597601876ca7cf927f6824ac88939bca51a6155c6394ea89355718643f302 WatchSource:0}: Error finding container ca2597601876ca7cf927f6824ac88939bca51a6155c6394ea89355718643f302: Status 404 returned error can't find the container with id ca2597601876ca7cf927f6824ac88939bca51a6155c6394ea89355718643f302 Feb 27 16:49:10 crc kubenswrapper[4751]: I0227 16:49:10.191935 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"2d8d9ed0-8606-47cb-a164-7e6bbac390cd","Type":"ContainerStarted","Data":"acf26164004e25883c932d9f0da8e1b26f131f371308f2367b0539bbdddade95"} Feb 27 16:49:10 crc kubenswrapper[4751]: I0227 16:49:10.192052 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="3bdb66cf-24f8-46e3-baef-dc1ef718e027" containerName="nova-scheduler-scheduler" containerID="cri-o://03351db41ca8520163d1679db83c712b3cbcb947ebc93b3da6924d9970f1a30b" gracePeriod=30 Feb 27 16:49:10 crc kubenswrapper[4751]: I0227 16:49:10.202398 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:10 crc kubenswrapper[4751]: I0227 16:49:10.531764 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0761fed0-37d3-4775-9a7b-23cde719205c" path="/var/lib/kubelet/pods/0761fed0-37d3-4775-9a7b-23cde719205c/volumes" Feb 27 16:49:10 crc kubenswrapper[4751]: I0227 16:49:10.532825 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4cff4a9-9681-46d5-8c65-03812065b51e" path="/var/lib/kubelet/pods/b4cff4a9-9681-46d5-8c65-03812065b51e/volumes" Feb 27 16:49:11 crc kubenswrapper[4751]: I0227 16:49:11.204729 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3634d5e3-a464-4b1b-91ef-bbe63f530d48","Type":"ContainerStarted","Data":"57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4"} Feb 27 16:49:11 crc kubenswrapper[4751]: I0227 16:49:11.204779 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3634d5e3-a464-4b1b-91ef-bbe63f530d48","Type":"ContainerStarted","Data":"6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8"} Feb 27 16:49:11 crc kubenswrapper[4751]: I0227 16:49:11.204793 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3634d5e3-a464-4b1b-91ef-bbe63f530d48","Type":"ContainerStarted","Data":"ca2597601876ca7cf927f6824ac88939bca51a6155c6394ea89355718643f302"} Feb 27 16:49:11 crc kubenswrapper[4751]: I0227 16:49:11.207166 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"2d8d9ed0-8606-47cb-a164-7e6bbac390cd","Type":"ContainerStarted","Data":"2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6"} Feb 27 16:49:11 crc kubenswrapper[4751]: I0227 16:49:11.207378 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:11 crc kubenswrapper[4751]: I0227 16:49:11.223254 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.22322076 podStartE2EDuration="2.22322076s" podCreationTimestamp="2026-02-27 16:49:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:11.222002468 +0000 UTC m=+1513.369016915" watchObservedRunningTime="2026-02-27 16:49:11.22322076 +0000 UTC m=+1513.370235257" Feb 27 16:49:11 crc kubenswrapper[4751]: I0227 16:49:11.253298 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.253277349 podStartE2EDuration="2.253277349s" podCreationTimestamp="2026-02-27 16:49:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:11.246442047 +0000 UTC m=+1513.393456514" watchObservedRunningTime="2026-02-27 16:49:11.253277349 +0000 UTC m=+1513.400291796" Feb 27 16:49:12 crc kubenswrapper[4751]: I0227 16:49:12.201886 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:49:12 crc kubenswrapper[4751]: I0227 16:49:12.202420 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="ad38c130-da58-4681-ac04-c017147fcc6e" containerName="kube-state-metrics" containerID="cri-o://1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923" gracePeriod=30 Feb 27 16:49:12 crc kubenswrapper[4751]: E0227 16:49:12.435825 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="03351db41ca8520163d1679db83c712b3cbcb947ebc93b3da6924d9970f1a30b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 27 16:49:12 crc kubenswrapper[4751]: E0227 16:49:12.437676 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="03351db41ca8520163d1679db83c712b3cbcb947ebc93b3da6924d9970f1a30b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 27 16:49:12 crc kubenswrapper[4751]: E0227 16:49:12.439012 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="03351db41ca8520163d1679db83c712b3cbcb947ebc93b3da6924d9970f1a30b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 27 16:49:12 crc kubenswrapper[4751]: E0227 16:49:12.439042 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="3bdb66cf-24f8-46e3-baef-dc1ef718e027" containerName="nova-scheduler-scheduler" Feb 27 16:49:12 crc kubenswrapper[4751]: I0227 16:49:12.677102 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 27 16:49:12 crc kubenswrapper[4751]: I0227 16:49:12.764239 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqdrk\" (UniqueName: \"kubernetes.io/projected/ad38c130-da58-4681-ac04-c017147fcc6e-kube-api-access-cqdrk\") pod \"ad38c130-da58-4681-ac04-c017147fcc6e\" (UID: \"ad38c130-da58-4681-ac04-c017147fcc6e\") " Feb 27 16:49:12 crc kubenswrapper[4751]: I0227 16:49:12.769735 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad38c130-da58-4681-ac04-c017147fcc6e-kube-api-access-cqdrk" (OuterVolumeSpecName: "kube-api-access-cqdrk") pod "ad38c130-da58-4681-ac04-c017147fcc6e" (UID: "ad38c130-da58-4681-ac04-c017147fcc6e"). InnerVolumeSpecName "kube-api-access-cqdrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:12 crc kubenswrapper[4751]: I0227 16:49:12.866889 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqdrk\" (UniqueName: \"kubernetes.io/projected/ad38c130-da58-4681-ac04-c017147fcc6e-kube-api-access-cqdrk\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.231807 4751 generic.go:334] "Generic (PLEG): container finished" podID="3bdb66cf-24f8-46e3-baef-dc1ef718e027" containerID="03351db41ca8520163d1679db83c712b3cbcb947ebc93b3da6924d9970f1a30b" exitCode=0 Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.231925 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3bdb66cf-24f8-46e3-baef-dc1ef718e027","Type":"ContainerDied","Data":"03351db41ca8520163d1679db83c712b3cbcb947ebc93b3da6924d9970f1a30b"} Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.236971 4751 generic.go:334] "Generic (PLEG): container finished" podID="ad38c130-da58-4681-ac04-c017147fcc6e" containerID="1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923" exitCode=2 Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.237026 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ad38c130-da58-4681-ac04-c017147fcc6e","Type":"ContainerDied","Data":"1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923"} Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.237066 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ad38c130-da58-4681-ac04-c017147fcc6e","Type":"ContainerDied","Data":"3a9063174388779578239d1f785ca827abbb194ec885bfb9bc94b5ba17289a0a"} Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.237096 4751 scope.go:117] "RemoveContainer" containerID="1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.237278 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.278298 4751 scope.go:117] "RemoveContainer" containerID="1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923" Feb 27 16:49:13 crc kubenswrapper[4751]: E0227 16:49:13.279153 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923\": container with ID starting with 1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923 not found: ID does not exist" containerID="1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.279201 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923"} err="failed to get container status \"1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923\": rpc error: code = NotFound desc = could not find container \"1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923\": container with ID starting with 1112bf30f48aa7869fb6fecec4782ea6f38b6fa39e3073193c9725ad3ad85923 not found: ID does not exist" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.288899 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.301925 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.312961 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:49:13 crc kubenswrapper[4751]: E0227 16:49:13.313577 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad38c130-da58-4681-ac04-c017147fcc6e" containerName="kube-state-metrics" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.313601 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad38c130-da58-4681-ac04-c017147fcc6e" containerName="kube-state-metrics" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.313879 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad38c130-da58-4681-ac04-c017147fcc6e" containerName="kube-state-metrics" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.314840 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.326661 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.327097 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.327331 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.378308 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghl79\" (UniqueName: \"kubernetes.io/projected/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-api-access-ghl79\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.378481 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.378537 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.378694 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.479999 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.480195 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghl79\" (UniqueName: \"kubernetes.io/projected/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-api-access-ghl79\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.480239 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.480263 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.484910 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.485112 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.485568 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.504388 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghl79\" (UniqueName: \"kubernetes.io/projected/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-api-access-ghl79\") pod \"kube-state-metrics-0\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.570693 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.640151 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.683183 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kxc6\" (UniqueName: \"kubernetes.io/projected/3bdb66cf-24f8-46e3-baef-dc1ef718e027-kube-api-access-4kxc6\") pod \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.683374 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-config-data\") pod \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.683662 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-combined-ca-bundle\") pod \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\" (UID: \"3bdb66cf-24f8-46e3-baef-dc1ef718e027\") " Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.689083 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bdb66cf-24f8-46e3-baef-dc1ef718e027-kube-api-access-4kxc6" (OuterVolumeSpecName: "kube-api-access-4kxc6") pod "3bdb66cf-24f8-46e3-baef-dc1ef718e027" (UID: "3bdb66cf-24f8-46e3-baef-dc1ef718e027"). InnerVolumeSpecName "kube-api-access-4kxc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.711809 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-config-data" (OuterVolumeSpecName: "config-data") pod "3bdb66cf-24f8-46e3-baef-dc1ef718e027" (UID: "3bdb66cf-24f8-46e3-baef-dc1ef718e027"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.715735 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3bdb66cf-24f8-46e3-baef-dc1ef718e027" (UID: "3bdb66cf-24f8-46e3-baef-dc1ef718e027"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.786638 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.786667 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kxc6\" (UniqueName: \"kubernetes.io/projected/3bdb66cf-24f8-46e3-baef-dc1ef718e027-kube-api-access-4kxc6\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:13 crc kubenswrapper[4751]: I0227 16:49:13.786678 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bdb66cf-24f8-46e3-baef-dc1ef718e027-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.073163 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.073490 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="ceilometer-central-agent" containerID="cri-o://4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b" gracePeriod=30 Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.073973 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="proxy-httpd" containerID="cri-o://06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576" gracePeriod=30 Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.074032 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="sg-core" containerID="cri-o://a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de" gracePeriod=30 Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.074078 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="ceilometer-notification-agent" containerID="cri-o://30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d" gracePeriod=30 Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.171792 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.258587 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa","Type":"ContainerStarted","Data":"385584b4a41f7c4ef34d9bd57960ad21c65a50948c309a4e0d4fb7cf3f3812c6"} Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.264338 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.264949 4751 generic.go:334] "Generic (PLEG): container finished" podID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerID="cb8104614ecfecafedc2ec0ef6e1d7ee8a8d6fae2a025fd82fad75fbcae2631e" exitCode=0 Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.265112 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2","Type":"ContainerDied","Data":"cb8104614ecfecafedc2ec0ef6e1d7ee8a8d6fae2a025fd82fad75fbcae2631e"} Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.265175 4751 scope.go:117] "RemoveContainer" containerID="cb8104614ecfecafedc2ec0ef6e1d7ee8a8d6fae2a025fd82fad75fbcae2631e" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.276860 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.276859 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3bdb66cf-24f8-46e3-baef-dc1ef718e027","Type":"ContainerDied","Data":"0072f04ddb27be64a9d020f3216d5866d0114dbbd5ba96827cccfd16677060a4"} Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.280287 4751 generic.go:334] "Generic (PLEG): container finished" podID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerID="a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de" exitCode=2 Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.280425 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4fac02e2-b5d9-4724-9373-9d4a8cfb6085","Type":"ContainerDied","Data":"a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de"} Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.303343 4751 scope.go:117] "RemoveContainer" containerID="c27cbe383a6f8bb61421a6d0a937ad659d75ba2ff6918a7336852656c87f2389" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.332472 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.347244 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.360181 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:14 crc kubenswrapper[4751]: E0227 16:49:14.360674 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerName="nova-api-api" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.360695 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerName="nova-api-api" Feb 27 16:49:14 crc kubenswrapper[4751]: E0227 16:49:14.360720 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bdb66cf-24f8-46e3-baef-dc1ef718e027" containerName="nova-scheduler-scheduler" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.360728 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bdb66cf-24f8-46e3-baef-dc1ef718e027" containerName="nova-scheduler-scheduler" Feb 27 16:49:14 crc kubenswrapper[4751]: E0227 16:49:14.360759 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerName="nova-api-log" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.360767 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerName="nova-api-log" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.367315 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerName="nova-api-log" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.367375 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bdb66cf-24f8-46e3-baef-dc1ef718e027" containerName="nova-scheduler-scheduler" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.367418 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" containerName="nova-api-api" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.368365 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.370743 4751 scope.go:117] "RemoveContainer" containerID="03351db41ca8520163d1679db83c712b3cbcb947ebc93b3da6924d9970f1a30b" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.372623 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.395354 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.406459 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghplk\" (UniqueName: \"kubernetes.io/projected/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-kube-api-access-ghplk\") pod \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.406499 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-combined-ca-bundle\") pod \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.406777 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-config-data\") pod \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.407160 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-logs\") pod \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\" (UID: \"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2\") " Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.407953 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-logs" (OuterVolumeSpecName: "logs") pod "0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" (UID: "0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.410750 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-kube-api-access-ghplk" (OuterVolumeSpecName: "kube-api-access-ghplk") pod "0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" (UID: "0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2"). InnerVolumeSpecName "kube-api-access-ghplk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.410872 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.430864 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-config-data" (OuterVolumeSpecName: "config-data") pod "0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" (UID: "0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.434262 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" (UID: "0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.512694 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-config-data\") pod \"nova-scheduler-0\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.512773 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9dt7\" (UniqueName: \"kubernetes.io/projected/a98d10f3-fed7-4a8a-9958-883d5da83f90-kube-api-access-l9dt7\") pod \"nova-scheduler-0\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.512807 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.513153 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghplk\" (UniqueName: \"kubernetes.io/projected/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-kube-api-access-ghplk\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.513190 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.513206 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.536611 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bdb66cf-24f8-46e3-baef-dc1ef718e027" path="/var/lib/kubelet/pods/3bdb66cf-24f8-46e3-baef-dc1ef718e027/volumes" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.537288 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad38c130-da58-4681-ac04-c017147fcc6e" path="/var/lib/kubelet/pods/ad38c130-da58-4681-ac04-c017147fcc6e/volumes" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.614885 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.615050 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-config-data\") pod \"nova-scheduler-0\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.615089 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9dt7\" (UniqueName: \"kubernetes.io/projected/a98d10f3-fed7-4a8a-9958-883d5da83f90-kube-api-access-l9dt7\") pod \"nova-scheduler-0\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.620357 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-config-data\") pod \"nova-scheduler-0\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.620412 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.631754 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9dt7\" (UniqueName: \"kubernetes.io/projected/a98d10f3-fed7-4a8a-9958-883d5da83f90-kube-api-access-l9dt7\") pod \"nova-scheduler-0\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.663481 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.663679 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 27 16:49:14 crc kubenswrapper[4751]: I0227 16:49:14.698188 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:49:15 crc kubenswrapper[4751]: W0227 16:49:15.195751 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda98d10f3_fed7_4a8a_9958_883d5da83f90.slice/crio-c04609f930b45cec0374c5ae75f4a3169cc18d66af9e5ffd7504871f1813faf2 WatchSource:0}: Error finding container c04609f930b45cec0374c5ae75f4a3169cc18d66af9e5ffd7504871f1813faf2: Status 404 returned error can't find the container with id c04609f930b45cec0374c5ae75f4a3169cc18d66af9e5ffd7504871f1813faf2 Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.196783 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.290028 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2","Type":"ContainerDied","Data":"e6889a17adea8cdfcd5b73e243618e2b40ee14506095d54fc307ff19938e8907"} Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.290067 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.291238 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a98d10f3-fed7-4a8a-9958-883d5da83f90","Type":"ContainerStarted","Data":"c04609f930b45cec0374c5ae75f4a3169cc18d66af9e5ffd7504871f1813faf2"} Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.297740 4751 generic.go:334] "Generic (PLEG): container finished" podID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerID="06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576" exitCode=0 Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.297786 4751 generic.go:334] "Generic (PLEG): container finished" podID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerID="4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b" exitCode=0 Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.297783 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4fac02e2-b5d9-4724-9373-9d4a8cfb6085","Type":"ContainerDied","Data":"06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576"} Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.297865 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4fac02e2-b5d9-4724-9373-9d4a8cfb6085","Type":"ContainerDied","Data":"4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b"} Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.300891 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa","Type":"ContainerStarted","Data":"4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab"} Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.301130 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.315822 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.330059 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.344009 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.8971153630000002 podStartE2EDuration="2.343979808s" podCreationTimestamp="2026-02-27 16:49:13 +0000 UTC" firstStartedPulling="2026-02-27 16:49:14.190156335 +0000 UTC m=+1516.337170782" lastFinishedPulling="2026-02-27 16:49:14.63702078 +0000 UTC m=+1516.784035227" observedRunningTime="2026-02-27 16:49:15.318650224 +0000 UTC m=+1517.465664701" watchObservedRunningTime="2026-02-27 16:49:15.343979808 +0000 UTC m=+1517.490994305" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.371669 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.373551 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.376993 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.381348 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.466475 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.466536 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5577ca19-7f2d-495d-8a47-6799c81a2dc6-logs\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.466687 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-config-data\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.466768 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlppt\" (UniqueName: \"kubernetes.io/projected/5577ca19-7f2d-495d-8a47-6799c81a2dc6-kube-api-access-dlppt\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.568818 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.568908 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5577ca19-7f2d-495d-8a47-6799c81a2dc6-logs\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.569061 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-config-data\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.569138 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlppt\" (UniqueName: \"kubernetes.io/projected/5577ca19-7f2d-495d-8a47-6799c81a2dc6-kube-api-access-dlppt\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.572165 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5577ca19-7f2d-495d-8a47-6799c81a2dc6-logs\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.574927 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-config-data\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.575496 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.586571 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlppt\" (UniqueName: \"kubernetes.io/projected/5577ca19-7f2d-495d-8a47-6799c81a2dc6-kube-api-access-dlppt\") pod \"nova-api-0\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " pod="openstack/nova-api-0" Feb 27 16:49:15 crc kubenswrapper[4751]: I0227 16:49:15.692593 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:16 crc kubenswrapper[4751]: I0227 16:49:16.129267 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:16 crc kubenswrapper[4751]: W0227 16:49:16.142285 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5577ca19_7f2d_495d_8a47_6799c81a2dc6.slice/crio-ee28c0ee2354e2b305a342a706e77c76beaf56028ed8b9f881e0d2fd73ac3bfd WatchSource:0}: Error finding container ee28c0ee2354e2b305a342a706e77c76beaf56028ed8b9f881e0d2fd73ac3bfd: Status 404 returned error can't find the container with id ee28c0ee2354e2b305a342a706e77c76beaf56028ed8b9f881e0d2fd73ac3bfd Feb 27 16:49:16 crc kubenswrapper[4751]: I0227 16:49:16.322153 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a98d10f3-fed7-4a8a-9958-883d5da83f90","Type":"ContainerStarted","Data":"af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2"} Feb 27 16:49:16 crc kubenswrapper[4751]: I0227 16:49:16.327425 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5577ca19-7f2d-495d-8a47-6799c81a2dc6","Type":"ContainerStarted","Data":"ee28c0ee2354e2b305a342a706e77c76beaf56028ed8b9f881e0d2fd73ac3bfd"} Feb 27 16:49:16 crc kubenswrapper[4751]: I0227 16:49:16.352088 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.352070362 podStartE2EDuration="2.352070362s" podCreationTimestamp="2026-02-27 16:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:16.342339893 +0000 UTC m=+1518.489354360" watchObservedRunningTime="2026-02-27 16:49:16.352070362 +0000 UTC m=+1518.499084799" Feb 27 16:49:16 crc kubenswrapper[4751]: I0227 16:49:16.540933 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2" path="/var/lib/kubelet/pods/0f4b9484-da3d-4fa7-9c92-a64df3aa7ec2/volumes" Feb 27 16:49:17 crc kubenswrapper[4751]: I0227 16:49:17.340660 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5577ca19-7f2d-495d-8a47-6799c81a2dc6","Type":"ContainerStarted","Data":"14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef"} Feb 27 16:49:17 crc kubenswrapper[4751]: I0227 16:49:17.341040 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5577ca19-7f2d-495d-8a47-6799c81a2dc6","Type":"ContainerStarted","Data":"2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922"} Feb 27 16:49:17 crc kubenswrapper[4751]: I0227 16:49:17.364512 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.364495731 podStartE2EDuration="2.364495731s" podCreationTimestamp="2026-02-27 16:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:17.36143397 +0000 UTC m=+1519.508448447" watchObservedRunningTime="2026-02-27 16:49:17.364495731 +0000 UTC m=+1519.511510178" Feb 27 16:49:19 crc kubenswrapper[4751]: I0227 16:49:19.652395 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Feb 27 16:49:19 crc kubenswrapper[4751]: I0227 16:49:19.663636 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 27 16:49:19 crc kubenswrapper[4751]: I0227 16:49:19.663699 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 27 16:49:19 crc kubenswrapper[4751]: I0227 16:49:19.699271 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 27 16:49:19 crc kubenswrapper[4751]: I0227 16:49:19.935803 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.058190 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-sg-core-conf-yaml\") pod \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.058300 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-combined-ca-bundle\") pod \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.058342 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gm8t7\" (UniqueName: \"kubernetes.io/projected/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-kube-api-access-gm8t7\") pod \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.058469 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-run-httpd\") pod \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.058558 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-config-data\") pod \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.058591 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-scripts\") pod \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.058658 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-log-httpd\") pod \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\" (UID: \"4fac02e2-b5d9-4724-9373-9d4a8cfb6085\") " Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.059657 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4fac02e2-b5d9-4724-9373-9d4a8cfb6085" (UID: "4fac02e2-b5d9-4724-9373-9d4a8cfb6085"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.060881 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4fac02e2-b5d9-4724-9373-9d4a8cfb6085" (UID: "4fac02e2-b5d9-4724-9373-9d4a8cfb6085"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.066499 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-scripts" (OuterVolumeSpecName: "scripts") pod "4fac02e2-b5d9-4724-9373-9d4a8cfb6085" (UID: "4fac02e2-b5d9-4724-9373-9d4a8cfb6085"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.066492 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-kube-api-access-gm8t7" (OuterVolumeSpecName: "kube-api-access-gm8t7") pod "4fac02e2-b5d9-4724-9373-9d4a8cfb6085" (UID: "4fac02e2-b5d9-4724-9373-9d4a8cfb6085"). InnerVolumeSpecName "kube-api-access-gm8t7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.119692 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4fac02e2-b5d9-4724-9373-9d4a8cfb6085" (UID: "4fac02e2-b5d9-4724-9373-9d4a8cfb6085"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.160996 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.161028 4751 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.161042 4751 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.161056 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gm8t7\" (UniqueName: \"kubernetes.io/projected/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-kube-api-access-gm8t7\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.161068 4751 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.164892 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4fac02e2-b5d9-4724-9373-9d4a8cfb6085" (UID: "4fac02e2-b5d9-4724-9373-9d4a8cfb6085"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.226676 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-config-data" (OuterVolumeSpecName: "config-data") pod "4fac02e2-b5d9-4724-9373-9d4a8cfb6085" (UID: "4fac02e2-b5d9-4724-9373-9d4a8cfb6085"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.263019 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.263053 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fac02e2-b5d9-4724-9373-9d4a8cfb6085-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.381708 4751 generic.go:334] "Generic (PLEG): container finished" podID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerID="30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d" exitCode=0 Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.381785 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4fac02e2-b5d9-4724-9373-9d4a8cfb6085","Type":"ContainerDied","Data":"30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d"} Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.381844 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4fac02e2-b5d9-4724-9373-9d4a8cfb6085","Type":"ContainerDied","Data":"0e508df2e04db5628330c13d5053447e29601d821d13385a99fdad6a2b406241"} Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.381871 4751 scope.go:117] "RemoveContainer" containerID="06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.382193 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.424262 4751 scope.go:117] "RemoveContainer" containerID="a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.444278 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.468051 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.478717 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:20 crc kubenswrapper[4751]: E0227 16:49:20.479183 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="sg-core" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.479202 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="sg-core" Feb 27 16:49:20 crc kubenswrapper[4751]: E0227 16:49:20.479220 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="ceilometer-central-agent" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.479229 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="ceilometer-central-agent" Feb 27 16:49:20 crc kubenswrapper[4751]: E0227 16:49:20.479236 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="ceilometer-notification-agent" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.479241 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="ceilometer-notification-agent" Feb 27 16:49:20 crc kubenswrapper[4751]: E0227 16:49:20.479256 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="proxy-httpd" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.479262 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="proxy-httpd" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.479454 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="ceilometer-notification-agent" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.479469 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="sg-core" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.479475 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="proxy-httpd" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.479498 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" containerName="ceilometer-central-agent" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.480858 4751 scope.go:117] "RemoveContainer" containerID="30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.481177 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.483311 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.483616 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.487046 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.495254 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.540846 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fac02e2-b5d9-4724-9373-9d4a8cfb6085" path="/var/lib/kubelet/pods/4fac02e2-b5d9-4724-9373-9d4a8cfb6085/volumes" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.552072 4751 scope.go:117] "RemoveContainer" containerID="4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.571215 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-config-data\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.571285 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-run-httpd\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.571335 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.571482 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqrrp\" (UniqueName: \"kubernetes.io/projected/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-kube-api-access-gqrrp\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.571657 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-scripts\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.571697 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-log-httpd\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.571994 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.572052 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.574175 4751 scope.go:117] "RemoveContainer" containerID="06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576" Feb 27 16:49:20 crc kubenswrapper[4751]: E0227 16:49:20.574749 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576\": container with ID starting with 06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576 not found: ID does not exist" containerID="06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.574785 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576"} err="failed to get container status \"06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576\": rpc error: code = NotFound desc = could not find container \"06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576\": container with ID starting with 06cf6289d52238a58e1c3bc175e7c6fcaf1845a19273779711aa9170eff34576 not found: ID does not exist" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.574814 4751 scope.go:117] "RemoveContainer" containerID="a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de" Feb 27 16:49:20 crc kubenswrapper[4751]: E0227 16:49:20.575798 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de\": container with ID starting with a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de not found: ID does not exist" containerID="a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.575830 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de"} err="failed to get container status \"a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de\": rpc error: code = NotFound desc = could not find container \"a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de\": container with ID starting with a1352fe5724f9dac0d6b0b3c713cc252ccccef71c2efd3bbef442b32ecdf74de not found: ID does not exist" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.575853 4751 scope.go:117] "RemoveContainer" containerID="30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d" Feb 27 16:49:20 crc kubenswrapper[4751]: E0227 16:49:20.576230 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d\": container with ID starting with 30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d not found: ID does not exist" containerID="30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.576247 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d"} err="failed to get container status \"30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d\": rpc error: code = NotFound desc = could not find container \"30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d\": container with ID starting with 30d1ed286c5e7f6acf6d94fd57787d8e3d65cfcc1658b89c52c9c210f94c3b5d not found: ID does not exist" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.576286 4751 scope.go:117] "RemoveContainer" containerID="4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b" Feb 27 16:49:20 crc kubenswrapper[4751]: E0227 16:49:20.576523 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b\": container with ID starting with 4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b not found: ID does not exist" containerID="4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.576570 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b"} err="failed to get container status \"4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b\": rpc error: code = NotFound desc = could not find container \"4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b\": container with ID starting with 4cf7a2d9a12cef1299160c21dbfd9bc0ba7da5c15089631692d9077f273ff42b not found: ID does not exist" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.674302 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-run-httpd\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.674473 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.674636 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqrrp\" (UniqueName: \"kubernetes.io/projected/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-kube-api-access-gqrrp\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.674745 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-scripts\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.674799 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-log-httpd\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.675038 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.675125 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.675160 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-config-data\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.678505 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-run-httpd\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.679493 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-log-httpd\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.681623 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-config-data\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.682658 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.684540 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.684794 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.693426 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.693803 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.693961 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-scripts\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.698063 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqrrp\" (UniqueName: \"kubernetes.io/projected/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-kube-api-access-gqrrp\") pod \"ceilometer-0\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " pod="openstack/ceilometer-0" Feb 27 16:49:20 crc kubenswrapper[4751]: I0227 16:49:20.851859 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:21 crc kubenswrapper[4751]: I0227 16:49:21.325973 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:21 crc kubenswrapper[4751]: I0227 16:49:21.390998 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83fefb2d-fb13-4128-8ac1-5d3eef1d288b","Type":"ContainerStarted","Data":"211fc100c0214159c063593b4489e61dc1e2e6be4005d2ba0a154c67e9c71700"} Feb 27 16:49:22 crc kubenswrapper[4751]: I0227 16:49:22.419374 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83fefb2d-fb13-4128-8ac1-5d3eef1d288b","Type":"ContainerStarted","Data":"35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b"} Feb 27 16:49:22 crc kubenswrapper[4751]: I0227 16:49:22.727722 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 16:49:23 crc kubenswrapper[4751]: I0227 16:49:23.432318 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83fefb2d-fb13-4128-8ac1-5d3eef1d288b","Type":"ContainerStarted","Data":"4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a"} Feb 27 16:49:23 crc kubenswrapper[4751]: I0227 16:49:23.432636 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83fefb2d-fb13-4128-8ac1-5d3eef1d288b","Type":"ContainerStarted","Data":"ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e"} Feb 27 16:49:23 crc kubenswrapper[4751]: I0227 16:49:23.654794 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Feb 27 16:49:24 crc kubenswrapper[4751]: I0227 16:49:24.698724 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 27 16:49:24 crc kubenswrapper[4751]: I0227 16:49:24.730612 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 27 16:49:25 crc kubenswrapper[4751]: I0227 16:49:25.458288 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83fefb2d-fb13-4128-8ac1-5d3eef1d288b","Type":"ContainerStarted","Data":"3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728"} Feb 27 16:49:25 crc kubenswrapper[4751]: I0227 16:49:25.458863 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 27 16:49:25 crc kubenswrapper[4751]: I0227 16:49:25.498392 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 27 16:49:25 crc kubenswrapper[4751]: I0227 16:49:25.499233 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.873036952 podStartE2EDuration="5.499219426s" podCreationTimestamp="2026-02-27 16:49:20 +0000 UTC" firstStartedPulling="2026-02-27 16:49:21.33537618 +0000 UTC m=+1523.482390637" lastFinishedPulling="2026-02-27 16:49:24.961558654 +0000 UTC m=+1527.108573111" observedRunningTime="2026-02-27 16:49:25.487167095 +0000 UTC m=+1527.634181552" watchObservedRunningTime="2026-02-27 16:49:25.499219426 +0000 UTC m=+1527.646233883" Feb 27 16:49:25 crc kubenswrapper[4751]: I0227 16:49:25.693304 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 27 16:49:25 crc kubenswrapper[4751]: I0227 16:49:25.693356 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 27 16:49:26 crc kubenswrapper[4751]: I0227 16:49:26.775917 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.204:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 27 16:49:26 crc kubenswrapper[4751]: I0227 16:49:26.776266 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.204:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 27 16:49:28 crc kubenswrapper[4751]: I0227 16:49:28.918196 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:49:28 crc kubenswrapper[4751]: I0227 16:49:28.918243 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:49:29 crc kubenswrapper[4751]: I0227 16:49:29.677248 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 27 16:49:29 crc kubenswrapper[4751]: I0227 16:49:29.680968 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 27 16:49:29 crc kubenswrapper[4751]: I0227 16:49:29.686240 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 27 16:49:30 crc kubenswrapper[4751]: I0227 16:49:30.511268 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.508550 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.523720 4751 generic.go:334] "Generic (PLEG): container finished" podID="1f64c674-22f4-48b5-ba58-d602b0c8a213" containerID="c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1" exitCode=137 Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.523857 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.542087 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1f64c674-22f4-48b5-ba58-d602b0c8a213","Type":"ContainerDied","Data":"c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1"} Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.542149 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1f64c674-22f4-48b5-ba58-d602b0c8a213","Type":"ContainerDied","Data":"e2637e4652b2fc035f9fe72ffdbddd1e4eb824ed6bd35d681c5c173686468b15"} Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.542171 4751 scope.go:117] "RemoveContainer" containerID="c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.577541 4751 scope.go:117] "RemoveContainer" containerID="c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1" Feb 27 16:49:32 crc kubenswrapper[4751]: E0227 16:49:32.577944 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1\": container with ID starting with c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1 not found: ID does not exist" containerID="c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.578002 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1"} err="failed to get container status \"c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1\": rpc error: code = NotFound desc = could not find container \"c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1\": container with ID starting with c37a1a7fefc6b4fbde99d73fa477165e2985a2fcc00ea69dd04bac88e3c82df1 not found: ID does not exist" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.620758 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-config-data\") pod \"1f64c674-22f4-48b5-ba58-d602b0c8a213\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.620923 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vx4j\" (UniqueName: \"kubernetes.io/projected/1f64c674-22f4-48b5-ba58-d602b0c8a213-kube-api-access-6vx4j\") pod \"1f64c674-22f4-48b5-ba58-d602b0c8a213\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.621000 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-combined-ca-bundle\") pod \"1f64c674-22f4-48b5-ba58-d602b0c8a213\" (UID: \"1f64c674-22f4-48b5-ba58-d602b0c8a213\") " Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.626678 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f64c674-22f4-48b5-ba58-d602b0c8a213-kube-api-access-6vx4j" (OuterVolumeSpecName: "kube-api-access-6vx4j") pod "1f64c674-22f4-48b5-ba58-d602b0c8a213" (UID: "1f64c674-22f4-48b5-ba58-d602b0c8a213"). InnerVolumeSpecName "kube-api-access-6vx4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.654430 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-config-data" (OuterVolumeSpecName: "config-data") pod "1f64c674-22f4-48b5-ba58-d602b0c8a213" (UID: "1f64c674-22f4-48b5-ba58-d602b0c8a213"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.656668 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1f64c674-22f4-48b5-ba58-d602b0c8a213" (UID: "1f64c674-22f4-48b5-ba58-d602b0c8a213"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.723232 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.723290 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vx4j\" (UniqueName: \"kubernetes.io/projected/1f64c674-22f4-48b5-ba58-d602b0c8a213-kube-api-access-6vx4j\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.723312 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f64c674-22f4-48b5-ba58-d602b0c8a213-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.894677 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.918455 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.934758 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:49:32 crc kubenswrapper[4751]: E0227 16:49:32.935900 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f64c674-22f4-48b5-ba58-d602b0c8a213" containerName="nova-cell1-novncproxy-novncproxy" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.935928 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f64c674-22f4-48b5-ba58-d602b0c8a213" containerName="nova-cell1-novncproxy-novncproxy" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.936977 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f64c674-22f4-48b5-ba58-d602b0c8a213" containerName="nova-cell1-novncproxy-novncproxy" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.937727 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.947301 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.947501 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.947655 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Feb 27 16:49:32 crc kubenswrapper[4751]: I0227 16:49:32.948251 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.030122 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.030563 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8c5xj\" (UniqueName: \"kubernetes.io/projected/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-kube-api-access-8c5xj\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.030652 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.030698 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.030860 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.132388 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.132588 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.132655 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8c5xj\" (UniqueName: \"kubernetes.io/projected/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-kube-api-access-8c5xj\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.132706 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.132748 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.136851 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.137237 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.139480 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.145626 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.162268 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8c5xj\" (UniqueName: \"kubernetes.io/projected/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-kube-api-access-8c5xj\") pod \"nova-cell1-novncproxy-0\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.258920 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:33 crc kubenswrapper[4751]: I0227 16:49:33.749417 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:49:33 crc kubenswrapper[4751]: W0227 16:49:33.755693 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47bf8499_97a6_4f76_8e2e_25b3fbff1d93.slice/crio-1484e719e243a2cfef5fb369afed1455dc208194c592d425ba92642bdf7893db WatchSource:0}: Error finding container 1484e719e243a2cfef5fb369afed1455dc208194c592d425ba92642bdf7893db: Status 404 returned error can't find the container with id 1484e719e243a2cfef5fb369afed1455dc208194c592d425ba92642bdf7893db Feb 27 16:49:34 crc kubenswrapper[4751]: I0227 16:49:34.535147 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f64c674-22f4-48b5-ba58-d602b0c8a213" path="/var/lib/kubelet/pods/1f64c674-22f4-48b5-ba58-d602b0c8a213/volumes" Feb 27 16:49:34 crc kubenswrapper[4751]: I0227 16:49:34.554263 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"47bf8499-97a6-4f76-8e2e-25b3fbff1d93","Type":"ContainerStarted","Data":"916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58"} Feb 27 16:49:34 crc kubenswrapper[4751]: I0227 16:49:34.554625 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"47bf8499-97a6-4f76-8e2e-25b3fbff1d93","Type":"ContainerStarted","Data":"1484e719e243a2cfef5fb369afed1455dc208194c592d425ba92642bdf7893db"} Feb 27 16:49:34 crc kubenswrapper[4751]: I0227 16:49:34.591873 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.591834607 podStartE2EDuration="2.591834607s" podCreationTimestamp="2026-02-27 16:49:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:34.575168813 +0000 UTC m=+1536.722183450" watchObservedRunningTime="2026-02-27 16:49:34.591834607 +0000 UTC m=+1536.738849084" Feb 27 16:49:35 crc kubenswrapper[4751]: I0227 16:49:35.698015 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 27 16:49:35 crc kubenswrapper[4751]: I0227 16:49:35.698459 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 27 16:49:35 crc kubenswrapper[4751]: I0227 16:49:35.701438 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 27 16:49:35 crc kubenswrapper[4751]: I0227 16:49:35.702340 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.588317 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.677393 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.868938 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-4xmph"] Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.870639 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.892128 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-4xmph"] Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.921086 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.921138 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.921163 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.921190 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rglq9\" (UniqueName: \"kubernetes.io/projected/ef097fe8-b372-4175-a5be-15fbb62905c9-kube-api-access-rglq9\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.921307 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:36 crc kubenswrapper[4751]: I0227 16:49:36.921496 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-config\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.023025 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-config\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.023118 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.023152 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.023182 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.023247 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rglq9\" (UniqueName: \"kubernetes.io/projected/ef097fe8-b372-4175-a5be-15fbb62905c9-kube-api-access-rglq9\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.023298 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.024518 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.024629 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.024701 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.024980 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-config\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.025614 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.043508 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rglq9\" (UniqueName: \"kubernetes.io/projected/ef097fe8-b372-4175-a5be-15fbb62905c9-kube-api-access-rglq9\") pod \"dnsmasq-dns-89c5cd4d5-4xmph\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.195386 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:37 crc kubenswrapper[4751]: I0227 16:49:37.829600 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-4xmph"] Feb 27 16:49:38 crc kubenswrapper[4751]: I0227 16:49:38.259985 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:38 crc kubenswrapper[4751]: I0227 16:49:38.606531 4751 generic.go:334] "Generic (PLEG): container finished" podID="ef097fe8-b372-4175-a5be-15fbb62905c9" containerID="990344b7f81bfa825e7bad025dd1c2594ea1b24962516e0ebe87a7b0ebe6d249" exitCode=0 Feb 27 16:49:38 crc kubenswrapper[4751]: I0227 16:49:38.606637 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" event={"ID":"ef097fe8-b372-4175-a5be-15fbb62905c9","Type":"ContainerDied","Data":"990344b7f81bfa825e7bad025dd1c2594ea1b24962516e0ebe87a7b0ebe6d249"} Feb 27 16:49:38 crc kubenswrapper[4751]: I0227 16:49:38.606688 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" event={"ID":"ef097fe8-b372-4175-a5be-15fbb62905c9","Type":"ContainerStarted","Data":"254fbbb664fd0dce6336c71bf65cddac0792e25e5d22c657dfedad0fc2c77195"} Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.235982 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.237168 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="ceilometer-central-agent" containerID="cri-o://35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b" gracePeriod=30 Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.237247 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="sg-core" containerID="cri-o://4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a" gracePeriod=30 Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.237302 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="ceilometer-notification-agent" containerID="cri-o://ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e" gracePeriod=30 Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.237362 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="proxy-httpd" containerID="cri-o://3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728" gracePeriod=30 Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.342440 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.205:3000/\": read tcp 10.217.0.2:60240->10.217.0.205:3000: read: connection reset by peer" Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.621063 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.621716 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" event={"ID":"ef097fe8-b372-4175-a5be-15fbb62905c9","Type":"ContainerStarted","Data":"ff7df5badcf4d604fc8a4002a9fdc2e25bffaf7333ce4b9ec101b40ca09bbc07"} Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.621758 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.624895 4751 generic.go:334] "Generic (PLEG): container finished" podID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerID="3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728" exitCode=0 Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.624926 4751 generic.go:334] "Generic (PLEG): container finished" podID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerID="4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a" exitCode=2 Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.624933 4751 generic.go:334] "Generic (PLEG): container finished" podID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerID="35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b" exitCode=0 Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.625114 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerName="nova-api-log" containerID="cri-o://14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef" gracePeriod=30 Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.625356 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83fefb2d-fb13-4128-8ac1-5d3eef1d288b","Type":"ContainerDied","Data":"3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728"} Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.625388 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83fefb2d-fb13-4128-8ac1-5d3eef1d288b","Type":"ContainerDied","Data":"4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a"} Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.625397 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83fefb2d-fb13-4128-8ac1-5d3eef1d288b","Type":"ContainerDied","Data":"35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b"} Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.625466 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerName="nova-api-api" containerID="cri-o://2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922" gracePeriod=30 Feb 27 16:49:39 crc kubenswrapper[4751]: I0227 16:49:39.644310 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" podStartSLOduration=3.6442924359999997 podStartE2EDuration="3.644292436s" podCreationTimestamp="2026-02-27 16:49:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:39.640959047 +0000 UTC m=+1541.787973514" watchObservedRunningTime="2026-02-27 16:49:39.644292436 +0000 UTC m=+1541.791306883" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.137789 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.190548 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-run-httpd\") pod \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.190661 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-sg-core-conf-yaml\") pod \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.190805 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-scripts\") pod \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.190863 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-ceilometer-tls-certs\") pod \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.190979 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-log-httpd\") pod \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.191059 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqrrp\" (UniqueName: \"kubernetes.io/projected/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-kube-api-access-gqrrp\") pod \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.191089 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-config-data\") pod \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.191096 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "83fefb2d-fb13-4128-8ac1-5d3eef1d288b" (UID: "83fefb2d-fb13-4128-8ac1-5d3eef1d288b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.191163 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-combined-ca-bundle\") pod \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\" (UID: \"83fefb2d-fb13-4128-8ac1-5d3eef1d288b\") " Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.191646 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "83fefb2d-fb13-4128-8ac1-5d3eef1d288b" (UID: "83fefb2d-fb13-4128-8ac1-5d3eef1d288b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.192254 4751 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.192278 4751 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.196992 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-scripts" (OuterVolumeSpecName: "scripts") pod "83fefb2d-fb13-4128-8ac1-5d3eef1d288b" (UID: "83fefb2d-fb13-4128-8ac1-5d3eef1d288b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.200053 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-kube-api-access-gqrrp" (OuterVolumeSpecName: "kube-api-access-gqrrp") pod "83fefb2d-fb13-4128-8ac1-5d3eef1d288b" (UID: "83fefb2d-fb13-4128-8ac1-5d3eef1d288b"). InnerVolumeSpecName "kube-api-access-gqrrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.230836 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "83fefb2d-fb13-4128-8ac1-5d3eef1d288b" (UID: "83fefb2d-fb13-4128-8ac1-5d3eef1d288b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.248332 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "83fefb2d-fb13-4128-8ac1-5d3eef1d288b" (UID: "83fefb2d-fb13-4128-8ac1-5d3eef1d288b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.274183 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83fefb2d-fb13-4128-8ac1-5d3eef1d288b" (UID: "83fefb2d-fb13-4128-8ac1-5d3eef1d288b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.294282 4751 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.294310 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.294319 4751 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.294329 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqrrp\" (UniqueName: \"kubernetes.io/projected/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-kube-api-access-gqrrp\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.294339 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.302046 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-config-data" (OuterVolumeSpecName: "config-data") pod "83fefb2d-fb13-4128-8ac1-5d3eef1d288b" (UID: "83fefb2d-fb13-4128-8ac1-5d3eef1d288b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.399116 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83fefb2d-fb13-4128-8ac1-5d3eef1d288b-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.636090 4751 generic.go:334] "Generic (PLEG): container finished" podID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerID="14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef" exitCode=143 Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.636154 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5577ca19-7f2d-495d-8a47-6799c81a2dc6","Type":"ContainerDied","Data":"14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef"} Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.639474 4751 generic.go:334] "Generic (PLEG): container finished" podID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerID="ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e" exitCode=0 Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.640476 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.641228 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83fefb2d-fb13-4128-8ac1-5d3eef1d288b","Type":"ContainerDied","Data":"ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e"} Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.641308 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"83fefb2d-fb13-4128-8ac1-5d3eef1d288b","Type":"ContainerDied","Data":"211fc100c0214159c063593b4489e61dc1e2e6be4005d2ba0a154c67e9c71700"} Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.641334 4751 scope.go:117] "RemoveContainer" containerID="3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.666708 4751 scope.go:117] "RemoveContainer" containerID="4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.668659 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.682913 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.692493 4751 scope.go:117] "RemoveContainer" containerID="ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.696463 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:40 crc kubenswrapper[4751]: E0227 16:49:40.696957 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="ceilometer-central-agent" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.696974 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="ceilometer-central-agent" Feb 27 16:49:40 crc kubenswrapper[4751]: E0227 16:49:40.696985 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="proxy-httpd" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.696992 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="proxy-httpd" Feb 27 16:49:40 crc kubenswrapper[4751]: E0227 16:49:40.697005 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="sg-core" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.697010 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="sg-core" Feb 27 16:49:40 crc kubenswrapper[4751]: E0227 16:49:40.697026 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="ceilometer-notification-agent" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.697032 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="ceilometer-notification-agent" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.710045 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="proxy-httpd" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.710091 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="ceilometer-notification-agent" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.710108 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="sg-core" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.710123 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" containerName="ceilometer-central-agent" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.715871 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.715976 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.718943 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.719646 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.720498 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.732143 4751 scope.go:117] "RemoveContainer" containerID="35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.752968 4751 scope.go:117] "RemoveContainer" containerID="3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728" Feb 27 16:49:40 crc kubenswrapper[4751]: E0227 16:49:40.753295 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728\": container with ID starting with 3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728 not found: ID does not exist" containerID="3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.753336 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728"} err="failed to get container status \"3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728\": rpc error: code = NotFound desc = could not find container \"3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728\": container with ID starting with 3582beaab9704691c0437574edf0531846ec22325d08421e4a5350d68162a728 not found: ID does not exist" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.753366 4751 scope.go:117] "RemoveContainer" containerID="4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a" Feb 27 16:49:40 crc kubenswrapper[4751]: E0227 16:49:40.753620 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a\": container with ID starting with 4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a not found: ID does not exist" containerID="4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.753644 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a"} err="failed to get container status \"4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a\": rpc error: code = NotFound desc = could not find container \"4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a\": container with ID starting with 4de07fd9d9b372056d9eb3eb6dc900453e083b5465c83c63fc8bc0f4ce14081a not found: ID does not exist" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.753659 4751 scope.go:117] "RemoveContainer" containerID="ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e" Feb 27 16:49:40 crc kubenswrapper[4751]: E0227 16:49:40.753902 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e\": container with ID starting with ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e not found: ID does not exist" containerID="ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.753943 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e"} err="failed to get container status \"ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e\": rpc error: code = NotFound desc = could not find container \"ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e\": container with ID starting with ddd5e8f405d047a95bfa0e410f092a30e8df2052743ab9fb4dd3b8b5fcec199e not found: ID does not exist" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.753972 4751 scope.go:117] "RemoveContainer" containerID="35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b" Feb 27 16:49:40 crc kubenswrapper[4751]: E0227 16:49:40.754200 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b\": container with ID starting with 35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b not found: ID does not exist" containerID="35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.754224 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b"} err="failed to get container status \"35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b\": rpc error: code = NotFound desc = could not find container \"35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b\": container with ID starting with 35f2ef280ef65258b010585c319f267bd4e79b9be8950e9e4854cc22e831c01b not found: ID does not exist" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.809646 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.809712 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-log-httpd\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.809804 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-config-data\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.809922 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.809945 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-run-httpd\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.810000 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-scripts\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.810017 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.810147 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dg88\" (UniqueName: \"kubernetes.io/projected/be11dea2-356a-4575-be55-84817429f998-kube-api-access-6dg88\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.911687 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.912210 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-run-httpd\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.912435 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-scripts\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.912579 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.912819 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dg88\" (UniqueName: \"kubernetes.io/projected/be11dea2-356a-4575-be55-84817429f998-kube-api-access-6dg88\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.913072 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.913214 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-run-httpd\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.913257 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-log-httpd\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.913562 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-config-data\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.913718 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-log-httpd\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.917868 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.919114 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-config-data\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.928511 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.930385 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.938103 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-scripts\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:40 crc kubenswrapper[4751]: I0227 16:49:40.940512 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dg88\" (UniqueName: \"kubernetes.io/projected/be11dea2-356a-4575-be55-84817429f998-kube-api-access-6dg88\") pod \"ceilometer-0\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " pod="openstack/ceilometer-0" Feb 27 16:49:41 crc kubenswrapper[4751]: I0227 16:49:41.034750 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:41 crc kubenswrapper[4751]: I0227 16:49:41.953206 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:41 crc kubenswrapper[4751]: W0227 16:49:41.961947 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe11dea2_356a_4575_be55_84817429f998.slice/crio-08d1909dfce80d8c7f8d4ff069ddba7dbef2ed438e29e9c26380f6a90a4dc001 WatchSource:0}: Error finding container 08d1909dfce80d8c7f8d4ff069ddba7dbef2ed438e29e9c26380f6a90a4dc001: Status 404 returned error can't find the container with id 08d1909dfce80d8c7f8d4ff069ddba7dbef2ed438e29e9c26380f6a90a4dc001 Feb 27 16:49:42 crc kubenswrapper[4751]: I0227 16:49:42.105395 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:42 crc kubenswrapper[4751]: I0227 16:49:42.542189 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83fefb2d-fb13-4128-8ac1-5d3eef1d288b" path="/var/lib/kubelet/pods/83fefb2d-fb13-4128-8ac1-5d3eef1d288b/volumes" Feb 27 16:49:42 crc kubenswrapper[4751]: I0227 16:49:42.659268 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be11dea2-356a-4575-be55-84817429f998","Type":"ContainerStarted","Data":"08d1909dfce80d8c7f8d4ff069ddba7dbef2ed438e29e9c26380f6a90a4dc001"} Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.259157 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.279017 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.582993 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.676501 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dlppt\" (UniqueName: \"kubernetes.io/projected/5577ca19-7f2d-495d-8a47-6799c81a2dc6-kube-api-access-dlppt\") pod \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.676617 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-config-data\") pod \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.676663 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5577ca19-7f2d-495d-8a47-6799c81a2dc6-logs\") pod \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.676689 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-combined-ca-bundle\") pod \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\" (UID: \"5577ca19-7f2d-495d-8a47-6799c81a2dc6\") " Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.677160 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5577ca19-7f2d-495d-8a47-6799c81a2dc6-logs" (OuterVolumeSpecName: "logs") pod "5577ca19-7f2d-495d-8a47-6799c81a2dc6" (UID: "5577ca19-7f2d-495d-8a47-6799c81a2dc6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.677733 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5577ca19-7f2d-495d-8a47-6799c81a2dc6-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.695611 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5577ca19-7f2d-495d-8a47-6799c81a2dc6-kube-api-access-dlppt" (OuterVolumeSpecName: "kube-api-access-dlppt") pod "5577ca19-7f2d-495d-8a47-6799c81a2dc6" (UID: "5577ca19-7f2d-495d-8a47-6799c81a2dc6"). InnerVolumeSpecName "kube-api-access-dlppt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.706795 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-config-data" (OuterVolumeSpecName: "config-data") pod "5577ca19-7f2d-495d-8a47-6799c81a2dc6" (UID: "5577ca19-7f2d-495d-8a47-6799c81a2dc6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.706952 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be11dea2-356a-4575-be55-84817429f998","Type":"ContainerStarted","Data":"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b"} Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.708335 4751 generic.go:334] "Generic (PLEG): container finished" podID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerID="2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922" exitCode=0 Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.712547 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.713151 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5577ca19-7f2d-495d-8a47-6799c81a2dc6","Type":"ContainerDied","Data":"2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922"} Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.713185 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5577ca19-7f2d-495d-8a47-6799c81a2dc6","Type":"ContainerDied","Data":"ee28c0ee2354e2b305a342a706e77c76beaf56028ed8b9f881e0d2fd73ac3bfd"} Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.713203 4751 scope.go:117] "RemoveContainer" containerID="2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.718550 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5577ca19-7f2d-495d-8a47-6799c81a2dc6" (UID: "5577ca19-7f2d-495d-8a47-6799c81a2dc6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.733052 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.773385 4751 scope.go:117] "RemoveContainer" containerID="14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.779860 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dlppt\" (UniqueName: \"kubernetes.io/projected/5577ca19-7f2d-495d-8a47-6799c81a2dc6-kube-api-access-dlppt\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.779886 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.779896 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5577ca19-7f2d-495d-8a47-6799c81a2dc6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.808573 4751 scope.go:117] "RemoveContainer" containerID="2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922" Feb 27 16:49:43 crc kubenswrapper[4751]: E0227 16:49:43.810079 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922\": container with ID starting with 2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922 not found: ID does not exist" containerID="2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.810111 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922"} err="failed to get container status \"2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922\": rpc error: code = NotFound desc = could not find container \"2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922\": container with ID starting with 2c4d12e0fa13482aa108be33e8c7c24044ad30670fdc330b23de19f0a9847922 not found: ID does not exist" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.810131 4751 scope.go:117] "RemoveContainer" containerID="14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef" Feb 27 16:49:43 crc kubenswrapper[4751]: E0227 16:49:43.810881 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef\": container with ID starting with 14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef not found: ID does not exist" containerID="14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.810905 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef"} err="failed to get container status \"14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef\": rpc error: code = NotFound desc = could not find container \"14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef\": container with ID starting with 14e2139d5148cd86da4f7ff3cbc72bad90ca2ea5c6a67c57c019a466547e8bef not found: ID does not exist" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.921158 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-pzgfd"] Feb 27 16:49:43 crc kubenswrapper[4751]: E0227 16:49:43.921781 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerName="nova-api-log" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.921800 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerName="nova-api-log" Feb 27 16:49:43 crc kubenswrapper[4751]: E0227 16:49:43.921812 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerName="nova-api-api" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.921819 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerName="nova-api-api" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.922010 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerName="nova-api-api" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.922023 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" containerName="nova-api-log" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.922730 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.925321 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.925510 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.932170 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-pzgfd"] Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.983314 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7gc4\" (UniqueName: \"kubernetes.io/projected/ac13e063-9022-462c-9f6c-0c73828106f8-kube-api-access-t7gc4\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.983431 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-scripts\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.983583 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-config-data\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:43 crc kubenswrapper[4751]: I0227 16:49:43.983651 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.043099 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.050528 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.062140 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.065110 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.067083 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.068654 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.068765 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.084636 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.084868 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpt8x\" (UniqueName: \"kubernetes.io/projected/f991a099-8443-46ee-beec-31c2ea772e32-kube-api-access-wpt8x\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.084933 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.084984 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.085061 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.085100 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7gc4\" (UniqueName: \"kubernetes.io/projected/ac13e063-9022-462c-9f6c-0c73828106f8-kube-api-access-t7gc4\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.085145 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f991a099-8443-46ee-beec-31c2ea772e32-logs\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.085176 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-scripts\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.085197 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-config-data\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.085268 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-public-tls-certs\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.085316 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-config-data\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.089296 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.090989 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-scripts\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.092921 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-config-data\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.112147 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7gc4\" (UniqueName: \"kubernetes.io/projected/ac13e063-9022-462c-9f6c-0c73828106f8-kube-api-access-t7gc4\") pod \"nova-cell1-cell-mapping-pzgfd\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.187284 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-public-tls-certs\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.187372 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpt8x\" (UniqueName: \"kubernetes.io/projected/f991a099-8443-46ee-beec-31c2ea772e32-kube-api-access-wpt8x\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.187393 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.187463 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.187503 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f991a099-8443-46ee-beec-31c2ea772e32-logs\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.187523 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-config-data\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.188820 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f991a099-8443-46ee-beec-31c2ea772e32-logs\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.191174 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-config-data\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.191704 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-public-tls-certs\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.195005 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.202357 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.204873 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpt8x\" (UniqueName: \"kubernetes.io/projected/f991a099-8443-46ee-beec-31c2ea772e32-kube-api-access-wpt8x\") pod \"nova-api-0\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.274963 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.387965 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.539954 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5577ca19-7f2d-495d-8a47-6799c81a2dc6" path="/var/lib/kubelet/pods/5577ca19-7f2d-495d-8a47-6799c81a2dc6/volumes" Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.730647 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be11dea2-356a-4575-be55-84817429f998","Type":"ContainerStarted","Data":"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f"} Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.731202 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be11dea2-356a-4575-be55-84817429f998","Type":"ContainerStarted","Data":"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3"} Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.784135 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-pzgfd"] Feb 27 16:49:44 crc kubenswrapper[4751]: W0227 16:49:44.785300 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac13e063_9022_462c_9f6c_0c73828106f8.slice/crio-23c9ee0ca63b5b2823662e530b7742668d57d4737484693a21ee5f0e58b4bdb6 WatchSource:0}: Error finding container 23c9ee0ca63b5b2823662e530b7742668d57d4737484693a21ee5f0e58b4bdb6: Status 404 returned error can't find the container with id 23c9ee0ca63b5b2823662e530b7742668d57d4737484693a21ee5f0e58b4bdb6 Feb 27 16:49:44 crc kubenswrapper[4751]: I0227 16:49:44.991816 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:45 crc kubenswrapper[4751]: I0227 16:49:45.745843 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f991a099-8443-46ee-beec-31c2ea772e32","Type":"ContainerStarted","Data":"abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985"} Feb 27 16:49:45 crc kubenswrapper[4751]: I0227 16:49:45.746219 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f991a099-8443-46ee-beec-31c2ea772e32","Type":"ContainerStarted","Data":"db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18"} Feb 27 16:49:45 crc kubenswrapper[4751]: I0227 16:49:45.746230 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f991a099-8443-46ee-beec-31c2ea772e32","Type":"ContainerStarted","Data":"d692f89131146d9169c995927712eb5992142b4018d429a0db0a454002062eff"} Feb 27 16:49:45 crc kubenswrapper[4751]: I0227 16:49:45.749233 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pzgfd" event={"ID":"ac13e063-9022-462c-9f6c-0c73828106f8","Type":"ContainerStarted","Data":"804834f3315040e77bbf995e6e2b32631baaf772804baf713722a0098da1a0df"} Feb 27 16:49:45 crc kubenswrapper[4751]: I0227 16:49:45.749268 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pzgfd" event={"ID":"ac13e063-9022-462c-9f6c-0c73828106f8","Type":"ContainerStarted","Data":"23c9ee0ca63b5b2823662e530b7742668d57d4737484693a21ee5f0e58b4bdb6"} Feb 27 16:49:45 crc kubenswrapper[4751]: I0227 16:49:45.771048 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.77102923 podStartE2EDuration="1.77102923s" podCreationTimestamp="2026-02-27 16:49:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:45.766661054 +0000 UTC m=+1547.913675501" watchObservedRunningTime="2026-02-27 16:49:45.77102923 +0000 UTC m=+1547.918043667" Feb 27 16:49:45 crc kubenswrapper[4751]: I0227 16:49:45.784743 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-pzgfd" podStartSLOduration=2.784727095 podStartE2EDuration="2.784727095s" podCreationTimestamp="2026-02-27 16:49:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:45.780702088 +0000 UTC m=+1547.927716535" watchObservedRunningTime="2026-02-27 16:49:45.784727095 +0000 UTC m=+1547.931741542" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.197606 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.261986 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-h8q5b"] Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.262274 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" podUID="e4c57506-90e1-43e2-afff-4038aedef2b4" containerName="dnsmasq-dns" containerID="cri-o://da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33" gracePeriod=10 Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.679456 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.759820 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6r5xd\" (UniqueName: \"kubernetes.io/projected/e4c57506-90e1-43e2-afff-4038aedef2b4-kube-api-access-6r5xd\") pod \"e4c57506-90e1-43e2-afff-4038aedef2b4\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.759915 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-sb\") pod \"e4c57506-90e1-43e2-afff-4038aedef2b4\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.759946 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-svc\") pod \"e4c57506-90e1-43e2-afff-4038aedef2b4\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.760010 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-nb\") pod \"e4c57506-90e1-43e2-afff-4038aedef2b4\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.760111 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-swift-storage-0\") pod \"e4c57506-90e1-43e2-afff-4038aedef2b4\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.760127 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-config\") pod \"e4c57506-90e1-43e2-afff-4038aedef2b4\" (UID: \"e4c57506-90e1-43e2-afff-4038aedef2b4\") " Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.765084 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4c57506-90e1-43e2-afff-4038aedef2b4-kube-api-access-6r5xd" (OuterVolumeSpecName: "kube-api-access-6r5xd") pod "e4c57506-90e1-43e2-afff-4038aedef2b4" (UID: "e4c57506-90e1-43e2-afff-4038aedef2b4"). InnerVolumeSpecName "kube-api-access-6r5xd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.783475 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be11dea2-356a-4575-be55-84817429f998","Type":"ContainerStarted","Data":"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856"} Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.783653 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="ceilometer-central-agent" containerID="cri-o://2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b" gracePeriod=30 Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.783706 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="proxy-httpd" containerID="cri-o://360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856" gracePeriod=30 Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.783711 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.783754 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="sg-core" containerID="cri-o://aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f" gracePeriod=30 Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.783774 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="ceilometer-notification-agent" containerID="cri-o://870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3" gracePeriod=30 Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.797120 4751 generic.go:334] "Generic (PLEG): container finished" podID="e4c57506-90e1-43e2-afff-4038aedef2b4" containerID="da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33" exitCode=0 Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.797199 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" event={"ID":"e4c57506-90e1-43e2-afff-4038aedef2b4","Type":"ContainerDied","Data":"da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33"} Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.797229 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" event={"ID":"e4c57506-90e1-43e2-afff-4038aedef2b4","Type":"ContainerDied","Data":"4bb8ac2965ea3e27832499f995e8f44fa9f39689a1f9ce5a104eacd91f6952b6"} Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.797247 4751 scope.go:117] "RemoveContainer" containerID="da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.797372 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-h8q5b" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.824673 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.906704805 podStartE2EDuration="7.824656524s" podCreationTimestamp="2026-02-27 16:49:40 +0000 UTC" firstStartedPulling="2026-02-27 16:49:41.963931861 +0000 UTC m=+1544.110946308" lastFinishedPulling="2026-02-27 16:49:46.88188358 +0000 UTC m=+1549.028898027" observedRunningTime="2026-02-27 16:49:47.818714546 +0000 UTC m=+1549.965728993" watchObservedRunningTime="2026-02-27 16:49:47.824656524 +0000 UTC m=+1549.971670971" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.834156 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e4c57506-90e1-43e2-afff-4038aedef2b4" (UID: "e4c57506-90e1-43e2-afff-4038aedef2b4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.840222 4751 scope.go:117] "RemoveContainer" containerID="ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.846668 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-config" (OuterVolumeSpecName: "config") pod "e4c57506-90e1-43e2-afff-4038aedef2b4" (UID: "e4c57506-90e1-43e2-afff-4038aedef2b4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.849052 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e4c57506-90e1-43e2-afff-4038aedef2b4" (UID: "e4c57506-90e1-43e2-afff-4038aedef2b4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.859950 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e4c57506-90e1-43e2-afff-4038aedef2b4" (UID: "e4c57506-90e1-43e2-afff-4038aedef2b4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.861146 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e4c57506-90e1-43e2-afff-4038aedef2b4" (UID: "e4c57506-90e1-43e2-afff-4038aedef2b4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.861777 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6r5xd\" (UniqueName: \"kubernetes.io/projected/e4c57506-90e1-43e2-afff-4038aedef2b4-kube-api-access-6r5xd\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.861796 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.861806 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.861817 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.861828 4751 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.861840 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4c57506-90e1-43e2-afff-4038aedef2b4-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.861925 4751 scope.go:117] "RemoveContainer" containerID="da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33" Feb 27 16:49:47 crc kubenswrapper[4751]: E0227 16:49:47.862329 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33\": container with ID starting with da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33 not found: ID does not exist" containerID="da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.862355 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33"} err="failed to get container status \"da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33\": rpc error: code = NotFound desc = could not find container \"da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33\": container with ID starting with da5651dc90032e50d819a0ffd54e4316b3c102c54d49e41a71a4d05113971c33 not found: ID does not exist" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.862374 4751 scope.go:117] "RemoveContainer" containerID="ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3" Feb 27 16:49:47 crc kubenswrapper[4751]: E0227 16:49:47.862681 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3\": container with ID starting with ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3 not found: ID does not exist" containerID="ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3" Feb 27 16:49:47 crc kubenswrapper[4751]: I0227 16:49:47.862704 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3"} err="failed to get container status \"ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3\": rpc error: code = NotFound desc = could not find container \"ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3\": container with ID starting with ecfd9155e07dd397cfc455eda01a9ead67a4d87f9814318b0df5b367d29e82d3 not found: ID does not exist" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.152697 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-h8q5b"] Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.161355 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-h8q5b"] Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.531588 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4c57506-90e1-43e2-afff-4038aedef2b4" path="/var/lib/kubelet/pods/e4c57506-90e1-43e2-afff-4038aedef2b4/volumes" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.735724 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.781887 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-sg-core-conf-yaml\") pod \"be11dea2-356a-4575-be55-84817429f998\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.782085 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dg88\" (UniqueName: \"kubernetes.io/projected/be11dea2-356a-4575-be55-84817429f998-kube-api-access-6dg88\") pod \"be11dea2-356a-4575-be55-84817429f998\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.782127 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-combined-ca-bundle\") pod \"be11dea2-356a-4575-be55-84817429f998\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.782169 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-scripts\") pod \"be11dea2-356a-4575-be55-84817429f998\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.782237 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-run-httpd\") pod \"be11dea2-356a-4575-be55-84817429f998\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.782326 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-log-httpd\") pod \"be11dea2-356a-4575-be55-84817429f998\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.782391 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-ceilometer-tls-certs\") pod \"be11dea2-356a-4575-be55-84817429f998\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.782497 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-config-data\") pod \"be11dea2-356a-4575-be55-84817429f998\" (UID: \"be11dea2-356a-4575-be55-84817429f998\") " Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.782723 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "be11dea2-356a-4575-be55-84817429f998" (UID: "be11dea2-356a-4575-be55-84817429f998"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.782796 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "be11dea2-356a-4575-be55-84817429f998" (UID: "be11dea2-356a-4575-be55-84817429f998"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.783264 4751 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.783288 4751 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be11dea2-356a-4575-be55-84817429f998-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.793770 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-scripts" (OuterVolumeSpecName: "scripts") pod "be11dea2-356a-4575-be55-84817429f998" (UID: "be11dea2-356a-4575-be55-84817429f998"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.794806 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be11dea2-356a-4575-be55-84817429f998-kube-api-access-6dg88" (OuterVolumeSpecName: "kube-api-access-6dg88") pod "be11dea2-356a-4575-be55-84817429f998" (UID: "be11dea2-356a-4575-be55-84817429f998"). InnerVolumeSpecName "kube-api-access-6dg88". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.813792 4751 generic.go:334] "Generic (PLEG): container finished" podID="be11dea2-356a-4575-be55-84817429f998" containerID="360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856" exitCode=0 Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.813821 4751 generic.go:334] "Generic (PLEG): container finished" podID="be11dea2-356a-4575-be55-84817429f998" containerID="aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f" exitCode=2 Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.813833 4751 generic.go:334] "Generic (PLEG): container finished" podID="be11dea2-356a-4575-be55-84817429f998" containerID="870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3" exitCode=0 Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.813839 4751 generic.go:334] "Generic (PLEG): container finished" podID="be11dea2-356a-4575-be55-84817429f998" containerID="2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b" exitCode=0 Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.813875 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be11dea2-356a-4575-be55-84817429f998","Type":"ContainerDied","Data":"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856"} Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.813902 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be11dea2-356a-4575-be55-84817429f998","Type":"ContainerDied","Data":"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f"} Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.813913 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be11dea2-356a-4575-be55-84817429f998","Type":"ContainerDied","Data":"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3"} Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.813924 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be11dea2-356a-4575-be55-84817429f998","Type":"ContainerDied","Data":"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b"} Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.813933 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be11dea2-356a-4575-be55-84817429f998","Type":"ContainerDied","Data":"08d1909dfce80d8c7f8d4ff069ddba7dbef2ed438e29e9c26380f6a90a4dc001"} Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.813947 4751 scope.go:117] "RemoveContainer" containerID="360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.814043 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.841199 4751 scope.go:117] "RemoveContainer" containerID="aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.851575 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "be11dea2-356a-4575-be55-84817429f998" (UID: "be11dea2-356a-4575-be55-84817429f998"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.857379 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "be11dea2-356a-4575-be55-84817429f998" (UID: "be11dea2-356a-4575-be55-84817429f998"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.862141 4751 scope.go:117] "RemoveContainer" containerID="870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.889484 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dg88\" (UniqueName: \"kubernetes.io/projected/be11dea2-356a-4575-be55-84817429f998-kube-api-access-6dg88\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.889524 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.889543 4751 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.889555 4751 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.891337 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be11dea2-356a-4575-be55-84817429f998" (UID: "be11dea2-356a-4575-be55-84817429f998"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.896169 4751 scope.go:117] "RemoveContainer" containerID="2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.941863 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-config-data" (OuterVolumeSpecName: "config-data") pod "be11dea2-356a-4575-be55-84817429f998" (UID: "be11dea2-356a-4575-be55-84817429f998"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.992168 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:48 crc kubenswrapper[4751]: I0227 16:49:48.992225 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be11dea2-356a-4575-be55-84817429f998-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.017109 4751 scope.go:117] "RemoveContainer" containerID="360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856" Feb 27 16:49:49 crc kubenswrapper[4751]: E0227 16:49:49.017544 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856\": container with ID starting with 360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856 not found: ID does not exist" containerID="360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.017571 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856"} err="failed to get container status \"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856\": rpc error: code = NotFound desc = could not find container \"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856\": container with ID starting with 360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856 not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.017593 4751 scope.go:117] "RemoveContainer" containerID="aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f" Feb 27 16:49:49 crc kubenswrapper[4751]: E0227 16:49:49.017883 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f\": container with ID starting with aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f not found: ID does not exist" containerID="aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.017952 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f"} err="failed to get container status \"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f\": rpc error: code = NotFound desc = could not find container \"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f\": container with ID starting with aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.018000 4751 scope.go:117] "RemoveContainer" containerID="870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3" Feb 27 16:49:49 crc kubenswrapper[4751]: E0227 16:49:49.018426 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3\": container with ID starting with 870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3 not found: ID does not exist" containerID="870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.018476 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3"} err="failed to get container status \"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3\": rpc error: code = NotFound desc = could not find container \"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3\": container with ID starting with 870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3 not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.018495 4751 scope.go:117] "RemoveContainer" containerID="2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b" Feb 27 16:49:49 crc kubenswrapper[4751]: E0227 16:49:49.018951 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b\": container with ID starting with 2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b not found: ID does not exist" containerID="2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.019022 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b"} err="failed to get container status \"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b\": rpc error: code = NotFound desc = could not find container \"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b\": container with ID starting with 2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.019056 4751 scope.go:117] "RemoveContainer" containerID="360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.019432 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856"} err="failed to get container status \"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856\": rpc error: code = NotFound desc = could not find container \"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856\": container with ID starting with 360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856 not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.019462 4751 scope.go:117] "RemoveContainer" containerID="aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.019844 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f"} err="failed to get container status \"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f\": rpc error: code = NotFound desc = could not find container \"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f\": container with ID starting with aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.019873 4751 scope.go:117] "RemoveContainer" containerID="870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.020265 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3"} err="failed to get container status \"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3\": rpc error: code = NotFound desc = could not find container \"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3\": container with ID starting with 870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3 not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.020311 4751 scope.go:117] "RemoveContainer" containerID="2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.020630 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b"} err="failed to get container status \"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b\": rpc error: code = NotFound desc = could not find container \"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b\": container with ID starting with 2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.020651 4751 scope.go:117] "RemoveContainer" containerID="360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.020841 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856"} err="failed to get container status \"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856\": rpc error: code = NotFound desc = could not find container \"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856\": container with ID starting with 360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856 not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.020862 4751 scope.go:117] "RemoveContainer" containerID="aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.021126 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f"} err="failed to get container status \"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f\": rpc error: code = NotFound desc = could not find container \"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f\": container with ID starting with aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.021144 4751 scope.go:117] "RemoveContainer" containerID="870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.021314 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3"} err="failed to get container status \"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3\": rpc error: code = NotFound desc = could not find container \"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3\": container with ID starting with 870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3 not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.021331 4751 scope.go:117] "RemoveContainer" containerID="2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.021515 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b"} err="failed to get container status \"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b\": rpc error: code = NotFound desc = could not find container \"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b\": container with ID starting with 2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.021533 4751 scope.go:117] "RemoveContainer" containerID="360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.021677 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856"} err="failed to get container status \"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856\": rpc error: code = NotFound desc = could not find container \"360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856\": container with ID starting with 360c2900572a535f0927254e49fce95d9204922f7462fcea691d0501274da856 not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.021696 4751 scope.go:117] "RemoveContainer" containerID="aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.021978 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f"} err="failed to get container status \"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f\": rpc error: code = NotFound desc = could not find container \"aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f\": container with ID starting with aaca1d3faadab68a573758faa25e213da32cb4a91968171f94cfb69496e7932f not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.021997 4751 scope.go:117] "RemoveContainer" containerID="870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.022234 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3"} err="failed to get container status \"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3\": rpc error: code = NotFound desc = could not find container \"870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3\": container with ID starting with 870c412f64ad56fd65a3ffd424704e3195d8cf0b2958542e1b77282d552f4bf3 not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.022261 4751 scope.go:117] "RemoveContainer" containerID="2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.022538 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b"} err="failed to get container status \"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b\": rpc error: code = NotFound desc = could not find container \"2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b\": container with ID starting with 2faa459d3df3d8b6f2a8975d4de0de1bde74ef71bdd6cb9c5fc0a4002e0e3c8b not found: ID does not exist" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.152118 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.160329 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.194917 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:49 crc kubenswrapper[4751]: E0227 16:49:49.195644 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="ceilometer-central-agent" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.195685 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="ceilometer-central-agent" Feb 27 16:49:49 crc kubenswrapper[4751]: E0227 16:49:49.195724 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4c57506-90e1-43e2-afff-4038aedef2b4" containerName="dnsmasq-dns" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.195743 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4c57506-90e1-43e2-afff-4038aedef2b4" containerName="dnsmasq-dns" Feb 27 16:49:49 crc kubenswrapper[4751]: E0227 16:49:49.195807 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="sg-core" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.195825 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="sg-core" Feb 27 16:49:49 crc kubenswrapper[4751]: E0227 16:49:49.195855 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="proxy-httpd" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.195873 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="proxy-httpd" Feb 27 16:49:49 crc kubenswrapper[4751]: E0227 16:49:49.195939 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="ceilometer-notification-agent" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.195966 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="ceilometer-notification-agent" Feb 27 16:49:49 crc kubenswrapper[4751]: E0227 16:49:49.196010 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4c57506-90e1-43e2-afff-4038aedef2b4" containerName="init" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.196027 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4c57506-90e1-43e2-afff-4038aedef2b4" containerName="init" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.196452 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="sg-core" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.196498 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="ceilometer-central-agent" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.196548 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="ceilometer-notification-agent" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.196601 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="be11dea2-356a-4575-be55-84817429f998" containerName="proxy-httpd" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.196638 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4c57506-90e1-43e2-afff-4038aedef2b4" containerName="dnsmasq-dns" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.200560 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.204486 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.205112 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.214496 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.225766 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.400335 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-run-httpd\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.400428 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-log-httpd\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.400464 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-scripts\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.400491 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.400525 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.400563 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-config-data\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.400596 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.400706 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6czv8\" (UniqueName: \"kubernetes.io/projected/b4785321-8f3e-44cb-833c-0b78bc368cd9-kube-api-access-6czv8\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.502515 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6czv8\" (UniqueName: \"kubernetes.io/projected/b4785321-8f3e-44cb-833c-0b78bc368cd9-kube-api-access-6czv8\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.502646 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-run-httpd\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.502687 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-log-httpd\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.502712 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-scripts\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.502735 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.502763 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.502801 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-config-data\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.502832 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.503127 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-log-httpd\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.503286 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-run-httpd\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.507409 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.507393 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.509056 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-config-data\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.509627 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.525366 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-scripts\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.547289 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6czv8\" (UniqueName: \"kubernetes.io/projected/b4785321-8f3e-44cb-833c-0b78bc368cd9-kube-api-access-6czv8\") pod \"ceilometer-0\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.827936 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.831359 4751 generic.go:334] "Generic (PLEG): container finished" podID="ac13e063-9022-462c-9f6c-0c73828106f8" containerID="804834f3315040e77bbf995e6e2b32631baaf772804baf713722a0098da1a0df" exitCode=0 Feb 27 16:49:49 crc kubenswrapper[4751]: I0227 16:49:49.831434 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pzgfd" event={"ID":"ac13e063-9022-462c-9f6c-0c73828106f8","Type":"ContainerDied","Data":"804834f3315040e77bbf995e6e2b32631baaf772804baf713722a0098da1a0df"} Feb 27 16:49:50 crc kubenswrapper[4751]: I0227 16:49:50.285826 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:49:50 crc kubenswrapper[4751]: W0227 16:49:50.287633 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4785321_8f3e_44cb_833c_0b78bc368cd9.slice/crio-e99657895ac4cea2b6cfd70755ad4bf2ea4e3ad10330c06620ad2d2ed0bab24c WatchSource:0}: Error finding container e99657895ac4cea2b6cfd70755ad4bf2ea4e3ad10330c06620ad2d2ed0bab24c: Status 404 returned error can't find the container with id e99657895ac4cea2b6cfd70755ad4bf2ea4e3ad10330c06620ad2d2ed0bab24c Feb 27 16:49:50 crc kubenswrapper[4751]: I0227 16:49:50.532055 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be11dea2-356a-4575-be55-84817429f998" path="/var/lib/kubelet/pods/be11dea2-356a-4575-be55-84817429f998/volumes" Feb 27 16:49:50 crc kubenswrapper[4751]: I0227 16:49:50.842308 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4785321-8f3e-44cb-833c-0b78bc368cd9","Type":"ContainerStarted","Data":"e99657895ac4cea2b6cfd70755ad4bf2ea4e3ad10330c06620ad2d2ed0bab24c"} Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.236637 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.334172 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-config-data\") pod \"ac13e063-9022-462c-9f6c-0c73828106f8\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.334218 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-scripts\") pod \"ac13e063-9022-462c-9f6c-0c73828106f8\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.334294 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7gc4\" (UniqueName: \"kubernetes.io/projected/ac13e063-9022-462c-9f6c-0c73828106f8-kube-api-access-t7gc4\") pod \"ac13e063-9022-462c-9f6c-0c73828106f8\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.334325 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-combined-ca-bundle\") pod \"ac13e063-9022-462c-9f6c-0c73828106f8\" (UID: \"ac13e063-9022-462c-9f6c-0c73828106f8\") " Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.352777 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-scripts" (OuterVolumeSpecName: "scripts") pod "ac13e063-9022-462c-9f6c-0c73828106f8" (UID: "ac13e063-9022-462c-9f6c-0c73828106f8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.352893 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac13e063-9022-462c-9f6c-0c73828106f8-kube-api-access-t7gc4" (OuterVolumeSpecName: "kube-api-access-t7gc4") pod "ac13e063-9022-462c-9f6c-0c73828106f8" (UID: "ac13e063-9022-462c-9f6c-0c73828106f8"). InnerVolumeSpecName "kube-api-access-t7gc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.364181 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ac13e063-9022-462c-9f6c-0c73828106f8" (UID: "ac13e063-9022-462c-9f6c-0c73828106f8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.367640 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-config-data" (OuterVolumeSpecName: "config-data") pod "ac13e063-9022-462c-9f6c-0c73828106f8" (UID: "ac13e063-9022-462c-9f6c-0c73828106f8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.436931 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.436986 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.437007 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7gc4\" (UniqueName: \"kubernetes.io/projected/ac13e063-9022-462c-9f6c-0c73828106f8-kube-api-access-t7gc4\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.437026 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac13e063-9022-462c-9f6c-0c73828106f8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.858896 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4785321-8f3e-44cb-833c-0b78bc368cd9","Type":"ContainerStarted","Data":"a9ee4f7f4be2929eae47c5cf12c06d5e1590f223ebab7558c7fbef22ddb4ca6f"} Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.858937 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4785321-8f3e-44cb-833c-0b78bc368cd9","Type":"ContainerStarted","Data":"5ea526479c46d824bbf94a208fd6d3670757ee20a011265cfd59b145eb86cf1e"} Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.861168 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pzgfd" event={"ID":"ac13e063-9022-462c-9f6c-0c73828106f8","Type":"ContainerDied","Data":"23c9ee0ca63b5b2823662e530b7742668d57d4737484693a21ee5f0e58b4bdb6"} Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.861189 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23c9ee0ca63b5b2823662e530b7742668d57d4737484693a21ee5f0e58b4bdb6" Feb 27 16:49:51 crc kubenswrapper[4751]: I0227 16:49:51.861254 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pzgfd" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.036558 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.041385 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f991a099-8443-46ee-beec-31c2ea772e32" containerName="nova-api-log" containerID="cri-o://db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18" gracePeriod=30 Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.041452 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f991a099-8443-46ee-beec-31c2ea772e32" containerName="nova-api-api" containerID="cri-o://abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985" gracePeriod=30 Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.051694 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.051934 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="a98d10f3-fed7-4a8a-9958-883d5da83f90" containerName="nova-scheduler-scheduler" containerID="cri-o://af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2" gracePeriod=30 Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.077137 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.077474 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-log" containerID="cri-o://6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8" gracePeriod=30 Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.077675 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-metadata" containerID="cri-o://57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4" gracePeriod=30 Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.491056 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.678751 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-config-data\") pod \"f991a099-8443-46ee-beec-31c2ea772e32\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.678800 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-combined-ca-bundle\") pod \"f991a099-8443-46ee-beec-31c2ea772e32\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.678845 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f991a099-8443-46ee-beec-31c2ea772e32-logs\") pod \"f991a099-8443-46ee-beec-31c2ea772e32\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.678903 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-public-tls-certs\") pod \"f991a099-8443-46ee-beec-31c2ea772e32\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.678951 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-internal-tls-certs\") pod \"f991a099-8443-46ee-beec-31c2ea772e32\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.678979 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpt8x\" (UniqueName: \"kubernetes.io/projected/f991a099-8443-46ee-beec-31c2ea772e32-kube-api-access-wpt8x\") pod \"f991a099-8443-46ee-beec-31c2ea772e32\" (UID: \"f991a099-8443-46ee-beec-31c2ea772e32\") " Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.680780 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f991a099-8443-46ee-beec-31c2ea772e32-logs" (OuterVolumeSpecName: "logs") pod "f991a099-8443-46ee-beec-31c2ea772e32" (UID: "f991a099-8443-46ee-beec-31c2ea772e32"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.684697 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f991a099-8443-46ee-beec-31c2ea772e32-kube-api-access-wpt8x" (OuterVolumeSpecName: "kube-api-access-wpt8x") pod "f991a099-8443-46ee-beec-31c2ea772e32" (UID: "f991a099-8443-46ee-beec-31c2ea772e32"). InnerVolumeSpecName "kube-api-access-wpt8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.722299 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-config-data" (OuterVolumeSpecName: "config-data") pod "f991a099-8443-46ee-beec-31c2ea772e32" (UID: "f991a099-8443-46ee-beec-31c2ea772e32"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.723701 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f991a099-8443-46ee-beec-31c2ea772e32" (UID: "f991a099-8443-46ee-beec-31c2ea772e32"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.746139 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f991a099-8443-46ee-beec-31c2ea772e32" (UID: "f991a099-8443-46ee-beec-31c2ea772e32"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.754802 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f991a099-8443-46ee-beec-31c2ea772e32" (UID: "f991a099-8443-46ee-beec-31c2ea772e32"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.780872 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.780899 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.780910 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f991a099-8443-46ee-beec-31c2ea772e32-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.780920 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.780929 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f991a099-8443-46ee-beec-31c2ea772e32-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.780938 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpt8x\" (UniqueName: \"kubernetes.io/projected/f991a099-8443-46ee-beec-31c2ea772e32-kube-api-access-wpt8x\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.875194 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4785321-8f3e-44cb-833c-0b78bc368cd9","Type":"ContainerStarted","Data":"62ed64131f674628306788d24cfc85250f4581f979fadacc28ddf528b64bebfd"} Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.878504 4751 generic.go:334] "Generic (PLEG): container finished" podID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerID="6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8" exitCode=143 Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.878595 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3634d5e3-a464-4b1b-91ef-bbe63f530d48","Type":"ContainerDied","Data":"6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8"} Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.881610 4751 generic.go:334] "Generic (PLEG): container finished" podID="f991a099-8443-46ee-beec-31c2ea772e32" containerID="abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985" exitCode=0 Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.881673 4751 generic.go:334] "Generic (PLEG): container finished" podID="f991a099-8443-46ee-beec-31c2ea772e32" containerID="db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18" exitCode=143 Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.881711 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f991a099-8443-46ee-beec-31c2ea772e32","Type":"ContainerDied","Data":"abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985"} Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.881762 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f991a099-8443-46ee-beec-31c2ea772e32","Type":"ContainerDied","Data":"db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18"} Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.881777 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f991a099-8443-46ee-beec-31c2ea772e32","Type":"ContainerDied","Data":"d692f89131146d9169c995927712eb5992142b4018d429a0db0a454002062eff"} Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.881803 4751 scope.go:117] "RemoveContainer" containerID="abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.882087 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.924154 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.929071 4751 scope.go:117] "RemoveContainer" containerID="db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.955843 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.959147 4751 scope.go:117] "RemoveContainer" containerID="abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985" Feb 27 16:49:52 crc kubenswrapper[4751]: E0227 16:49:52.960049 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985\": container with ID starting with abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985 not found: ID does not exist" containerID="abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.960140 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985"} err="failed to get container status \"abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985\": rpc error: code = NotFound desc = could not find container \"abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985\": container with ID starting with abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985 not found: ID does not exist" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.960225 4751 scope.go:117] "RemoveContainer" containerID="db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.964500 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:52 crc kubenswrapper[4751]: E0227 16:49:52.965021 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f991a099-8443-46ee-beec-31c2ea772e32" containerName="nova-api-api" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.965040 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f991a099-8443-46ee-beec-31c2ea772e32" containerName="nova-api-api" Feb 27 16:49:52 crc kubenswrapper[4751]: E0227 16:49:52.965056 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac13e063-9022-462c-9f6c-0c73828106f8" containerName="nova-manage" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.965064 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac13e063-9022-462c-9f6c-0c73828106f8" containerName="nova-manage" Feb 27 16:49:52 crc kubenswrapper[4751]: E0227 16:49:52.965102 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f991a099-8443-46ee-beec-31c2ea772e32" containerName="nova-api-log" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.965111 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f991a099-8443-46ee-beec-31c2ea772e32" containerName="nova-api-log" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.965313 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac13e063-9022-462c-9f6c-0c73828106f8" containerName="nova-manage" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.965337 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f991a099-8443-46ee-beec-31c2ea772e32" containerName="nova-api-api" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.965357 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f991a099-8443-46ee-beec-31c2ea772e32" containerName="nova-api-log" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.966536 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.969709 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.969975 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.970163 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.973150 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:52 crc kubenswrapper[4751]: E0227 16:49:52.976115 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18\": container with ID starting with db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18 not found: ID does not exist" containerID="db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.976252 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18"} err="failed to get container status \"db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18\": rpc error: code = NotFound desc = could not find container \"db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18\": container with ID starting with db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18 not found: ID does not exist" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.976356 4751 scope.go:117] "RemoveContainer" containerID="abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.977245 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985"} err="failed to get container status \"abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985\": rpc error: code = NotFound desc = could not find container \"abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985\": container with ID starting with abfded1fcc2a250b2ce480b9ef2b6c936471c5e67e2d8d11643062679f535985 not found: ID does not exist" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.977310 4751 scope.go:117] "RemoveContainer" containerID="db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18" Feb 27 16:49:52 crc kubenswrapper[4751]: I0227 16:49:52.979342 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18"} err="failed to get container status \"db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18\": rpc error: code = NotFound desc = could not find container \"db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18\": container with ID starting with db69de2c465eab3181bbc7f85af64fb2a0e31d20884a10f5d1d6762f601d7a18 not found: ID does not exist" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.085174 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6zwb\" (UniqueName: \"kubernetes.io/projected/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-kube-api-access-f6zwb\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.085557 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-config-data\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.085676 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-logs\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.085854 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-public-tls-certs\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.085963 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-internal-tls-certs\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.086059 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.188036 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.188142 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6zwb\" (UniqueName: \"kubernetes.io/projected/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-kube-api-access-f6zwb\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.188211 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-config-data\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.188241 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-logs\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.188345 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-public-tls-certs\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.188385 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-internal-tls-certs\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.188900 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-logs\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.193058 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-public-tls-certs\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.193086 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.193426 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-internal-tls-certs\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.193791 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-config-data\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.208020 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6zwb\" (UniqueName: \"kubernetes.io/projected/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-kube-api-access-f6zwb\") pod \"nova-api-0\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.284112 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.588851 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.696653 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9dt7\" (UniqueName: \"kubernetes.io/projected/a98d10f3-fed7-4a8a-9958-883d5da83f90-kube-api-access-l9dt7\") pod \"a98d10f3-fed7-4a8a-9958-883d5da83f90\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.696829 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-config-data\") pod \"a98d10f3-fed7-4a8a-9958-883d5da83f90\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.696885 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-combined-ca-bundle\") pod \"a98d10f3-fed7-4a8a-9958-883d5da83f90\" (UID: \"a98d10f3-fed7-4a8a-9958-883d5da83f90\") " Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.706333 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a98d10f3-fed7-4a8a-9958-883d5da83f90-kube-api-access-l9dt7" (OuterVolumeSpecName: "kube-api-access-l9dt7") pod "a98d10f3-fed7-4a8a-9958-883d5da83f90" (UID: "a98d10f3-fed7-4a8a-9958-883d5da83f90"). InnerVolumeSpecName "kube-api-access-l9dt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.728059 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-config-data" (OuterVolumeSpecName: "config-data") pod "a98d10f3-fed7-4a8a-9958-883d5da83f90" (UID: "a98d10f3-fed7-4a8a-9958-883d5da83f90"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.729682 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a98d10f3-fed7-4a8a-9958-883d5da83f90" (UID: "a98d10f3-fed7-4a8a-9958-883d5da83f90"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.800168 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9dt7\" (UniqueName: \"kubernetes.io/projected/a98d10f3-fed7-4a8a-9958-883d5da83f90-kube-api-access-l9dt7\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.800208 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.800220 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a98d10f3-fed7-4a8a-9958-883d5da83f90-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.846282 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:49:53 crc kubenswrapper[4751]: W0227 16:49:53.855075 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a8f14c4_f8bc_4247_b2a2_72aa4801adfa.slice/crio-b2e6d637a45de991813b9f92b6686d3bb835f980ea0aed40e243770b76f696e4 WatchSource:0}: Error finding container b2e6d637a45de991813b9f92b6686d3bb835f980ea0aed40e243770b76f696e4: Status 404 returned error can't find the container with id b2e6d637a45de991813b9f92b6686d3bb835f980ea0aed40e243770b76f696e4 Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.891854 4751 generic.go:334] "Generic (PLEG): container finished" podID="a98d10f3-fed7-4a8a-9958-883d5da83f90" containerID="af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2" exitCode=0 Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.892036 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a98d10f3-fed7-4a8a-9958-883d5da83f90","Type":"ContainerDied","Data":"af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2"} Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.892247 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a98d10f3-fed7-4a8a-9958-883d5da83f90","Type":"ContainerDied","Data":"c04609f930b45cec0374c5ae75f4a3169cc18d66af9e5ffd7504871f1813faf2"} Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.892109 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.892298 4751 scope.go:117] "RemoveContainer" containerID="af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.895461 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa","Type":"ContainerStarted","Data":"b2e6d637a45de991813b9f92b6686d3bb835f980ea0aed40e243770b76f696e4"} Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.929132 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.929844 4751 scope.go:117] "RemoveContainer" containerID="af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2" Feb 27 16:49:53 crc kubenswrapper[4751]: E0227 16:49:53.931075 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2\": container with ID starting with af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2 not found: ID does not exist" containerID="af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.931108 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2"} err="failed to get container status \"af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2\": rpc error: code = NotFound desc = could not find container \"af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2\": container with ID starting with af61843773813888f996b0c5c17b45d89116b85521599b67ce20128a55a44de2 not found: ID does not exist" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.946035 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.962067 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:53 crc kubenswrapper[4751]: E0227 16:49:53.962638 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a98d10f3-fed7-4a8a-9958-883d5da83f90" containerName="nova-scheduler-scheduler" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.962666 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a98d10f3-fed7-4a8a-9958-883d5da83f90" containerName="nova-scheduler-scheduler" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.968296 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a98d10f3-fed7-4a8a-9958-883d5da83f90" containerName="nova-scheduler-scheduler" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.970577 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.971110 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:53 crc kubenswrapper[4751]: I0227 16:49:53.972857 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.107690 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-config-data\") pod \"nova-scheduler-0\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.107781 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjbgd\" (UniqueName: \"kubernetes.io/projected/7555b92d-c801-4da2-8d2e-78fa39c892d2-kube-api-access-bjbgd\") pod \"nova-scheduler-0\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.107806 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.210129 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-config-data\") pod \"nova-scheduler-0\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.210220 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjbgd\" (UniqueName: \"kubernetes.io/projected/7555b92d-c801-4da2-8d2e-78fa39c892d2-kube-api-access-bjbgd\") pod \"nova-scheduler-0\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.210243 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.215117 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-config-data\") pod \"nova-scheduler-0\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.215906 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.232932 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjbgd\" (UniqueName: \"kubernetes.io/projected/7555b92d-c801-4da2-8d2e-78fa39c892d2-kube-api-access-bjbgd\") pod \"nova-scheduler-0\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " pod="openstack/nova-scheduler-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.290878 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.568605 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a98d10f3-fed7-4a8a-9958-883d5da83f90" path="/var/lib/kubelet/pods/a98d10f3-fed7-4a8a-9958-883d5da83f90/volumes" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.569547 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f991a099-8443-46ee-beec-31c2ea772e32" path="/var/lib/kubelet/pods/f991a099-8443-46ee-beec-31c2ea772e32/volumes" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.849634 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.914089 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4785321-8f3e-44cb-833c-0b78bc368cd9","Type":"ContainerStarted","Data":"c4a7175c059cf3518ae6eba6d361fbebbc8c52020d2692d9f04fb59309e9cac4"} Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.914152 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.916111 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7555b92d-c801-4da2-8d2e-78fa39c892d2","Type":"ContainerStarted","Data":"511808db6495adc566dd8f0439256b3b7a63c64b047ec4bb690f7e87c6840453"} Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.919224 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa","Type":"ContainerStarted","Data":"a91412d1338cfce1b6aed60bd52a679afa01513653b047734294840a9a916ff5"} Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.919246 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa","Type":"ContainerStarted","Data":"7093fc3fe4d41f6bca93c56cdb9de8d375834320491b81f1d7f637c564ea6641"} Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.951273 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.215064632 podStartE2EDuration="5.951255794s" podCreationTimestamp="2026-02-27 16:49:49 +0000 UTC" firstStartedPulling="2026-02-27 16:49:50.289458174 +0000 UTC m=+1552.436472611" lastFinishedPulling="2026-02-27 16:49:54.025649326 +0000 UTC m=+1556.172663773" observedRunningTime="2026-02-27 16:49:54.941671569 +0000 UTC m=+1557.088686036" watchObservedRunningTime="2026-02-27 16:49:54.951255794 +0000 UTC m=+1557.098270241" Feb 27 16:49:54 crc kubenswrapper[4751]: I0227 16:49:54.966470 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.966453179 podStartE2EDuration="2.966453179s" podCreationTimestamp="2026-02-27 16:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:54.96048063 +0000 UTC m=+1557.107495077" watchObservedRunningTime="2026-02-27 16:49:54.966453179 +0000 UTC m=+1557.113467626" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.230829 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": read tcp 10.217.0.2:45032->10.217.0.201:8775: read: connection reset by peer" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.231139 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": read tcp 10.217.0.2:45038->10.217.0.201:8775: read: connection reset by peer" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.688917 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.843891 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-nova-metadata-tls-certs\") pod \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.843941 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-config-data\") pod \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.844079 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cbwjf\" (UniqueName: \"kubernetes.io/projected/3634d5e3-a464-4b1b-91ef-bbe63f530d48-kube-api-access-cbwjf\") pod \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.844162 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-combined-ca-bundle\") pod \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.845228 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3634d5e3-a464-4b1b-91ef-bbe63f530d48-logs\") pod \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\" (UID: \"3634d5e3-a464-4b1b-91ef-bbe63f530d48\") " Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.846008 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3634d5e3-a464-4b1b-91ef-bbe63f530d48-logs" (OuterVolumeSpecName: "logs") pod "3634d5e3-a464-4b1b-91ef-bbe63f530d48" (UID: "3634d5e3-a464-4b1b-91ef-bbe63f530d48"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.864007 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3634d5e3-a464-4b1b-91ef-bbe63f530d48-kube-api-access-cbwjf" (OuterVolumeSpecName: "kube-api-access-cbwjf") pod "3634d5e3-a464-4b1b-91ef-bbe63f530d48" (UID: "3634d5e3-a464-4b1b-91ef-bbe63f530d48"). InnerVolumeSpecName "kube-api-access-cbwjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.880294 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3634d5e3-a464-4b1b-91ef-bbe63f530d48" (UID: "3634d5e3-a464-4b1b-91ef-bbe63f530d48"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.902407 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-config-data" (OuterVolumeSpecName: "config-data") pod "3634d5e3-a464-4b1b-91ef-bbe63f530d48" (UID: "3634d5e3-a464-4b1b-91ef-bbe63f530d48"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.936110 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "3634d5e3-a464-4b1b-91ef-bbe63f530d48" (UID: "3634d5e3-a464-4b1b-91ef-bbe63f530d48"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.936387 4751 generic.go:334] "Generic (PLEG): container finished" podID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerID="57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4" exitCode=0 Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.936455 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3634d5e3-a464-4b1b-91ef-bbe63f530d48","Type":"ContainerDied","Data":"57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4"} Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.936521 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3634d5e3-a464-4b1b-91ef-bbe63f530d48","Type":"ContainerDied","Data":"ca2597601876ca7cf927f6824ac88939bca51a6155c6394ea89355718643f302"} Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.936483 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.936553 4751 scope.go:117] "RemoveContainer" containerID="57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.940222 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7555b92d-c801-4da2-8d2e-78fa39c892d2","Type":"ContainerStarted","Data":"93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65"} Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.947967 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cbwjf\" (UniqueName: \"kubernetes.io/projected/3634d5e3-a464-4b1b-91ef-bbe63f530d48-kube-api-access-cbwjf\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.948001 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.948015 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3634d5e3-a464-4b1b-91ef-bbe63f530d48-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.948031 4751 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.948044 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3634d5e3-a464-4b1b-91ef-bbe63f530d48-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.968277 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.968254426 podStartE2EDuration="2.968254426s" podCreationTimestamp="2026-02-27 16:49:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:55.959974175 +0000 UTC m=+1558.106988622" watchObservedRunningTime="2026-02-27 16:49:55.968254426 +0000 UTC m=+1558.115268873" Feb 27 16:49:55 crc kubenswrapper[4751]: I0227 16:49:55.968843 4751 scope.go:117] "RemoveContainer" containerID="6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.009302 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.013180 4751 scope.go:117] "RemoveContainer" containerID="57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4" Feb 27 16:49:56 crc kubenswrapper[4751]: E0227 16:49:56.017620 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4\": container with ID starting with 57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4 not found: ID does not exist" containerID="57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.017682 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4"} err="failed to get container status \"57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4\": rpc error: code = NotFound desc = could not find container \"57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4\": container with ID starting with 57e845be3c5f09f94c9bf0015590d42555fe5d4051a56aa33f23bb8c7e32c4c4 not found: ID does not exist" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.017719 4751 scope.go:117] "RemoveContainer" containerID="6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8" Feb 27 16:49:56 crc kubenswrapper[4751]: E0227 16:49:56.025275 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8\": container with ID starting with 6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8 not found: ID does not exist" containerID="6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.025333 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8"} err="failed to get container status \"6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8\": rpc error: code = NotFound desc = could not find container \"6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8\": container with ID starting with 6a4f2712c616d4410a3a9cead957bdcd07c42628edcfe9b34a4767d6b46ec9f8 not found: ID does not exist" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.037744 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.048224 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:56 crc kubenswrapper[4751]: E0227 16:49:56.048755 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-metadata" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.048778 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-metadata" Feb 27 16:49:56 crc kubenswrapper[4751]: E0227 16:49:56.048822 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-log" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.048830 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-log" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.049020 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-metadata" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.049046 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" containerName="nova-metadata-log" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.050155 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.054259 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.060001 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.060807 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.151653 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-logs\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.151764 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.151793 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42kg7\" (UniqueName: \"kubernetes.io/projected/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-kube-api-access-42kg7\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.151820 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.151876 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-config-data\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.253257 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-config-data\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.253994 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-logs\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.254168 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.254282 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42kg7\" (UniqueName: \"kubernetes.io/projected/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-kube-api-access-42kg7\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.254371 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.254508 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-logs\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.258244 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-config-data\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.258915 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.262610 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.272725 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42kg7\" (UniqueName: \"kubernetes.io/projected/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-kube-api-access-42kg7\") pod \"nova-metadata-0\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.388177 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.544991 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3634d5e3-a464-4b1b-91ef-bbe63f530d48" path="/var/lib/kubelet/pods/3634d5e3-a464-4b1b-91ef-bbe63f530d48/volumes" Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.863473 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:49:56 crc kubenswrapper[4751]: W0227 16:49:56.865931 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68bcf8b3_a271_47f0_9815_17cd3fdaec3e.slice/crio-067057f4d2b8204c4f197162308579313fb901d7226b09612ed331c69316ab6f WatchSource:0}: Error finding container 067057f4d2b8204c4f197162308579313fb901d7226b09612ed331c69316ab6f: Status 404 returned error can't find the container with id 067057f4d2b8204c4f197162308579313fb901d7226b09612ed331c69316ab6f Feb 27 16:49:56 crc kubenswrapper[4751]: I0227 16:49:56.959337 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"68bcf8b3-a271-47f0-9815-17cd3fdaec3e","Type":"ContainerStarted","Data":"067057f4d2b8204c4f197162308579313fb901d7226b09612ed331c69316ab6f"} Feb 27 16:49:57 crc kubenswrapper[4751]: I0227 16:49:57.977015 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"68bcf8b3-a271-47f0-9815-17cd3fdaec3e","Type":"ContainerStarted","Data":"95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854"} Feb 27 16:49:57 crc kubenswrapper[4751]: I0227 16:49:57.978437 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"68bcf8b3-a271-47f0-9815-17cd3fdaec3e","Type":"ContainerStarted","Data":"efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20"} Feb 27 16:49:58 crc kubenswrapper[4751]: I0227 16:49:58.009031 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.009015038 podStartE2EDuration="3.009015038s" podCreationTimestamp="2026-02-27 16:49:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 16:49:58.00722029 +0000 UTC m=+1560.154234777" watchObservedRunningTime="2026-02-27 16:49:58.009015038 +0000 UTC m=+1560.156029485" Feb 27 16:49:58 crc kubenswrapper[4751]: I0227 16:49:58.918012 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:49:58 crc kubenswrapper[4751]: I0227 16:49:58.918363 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:49:58 crc kubenswrapper[4751]: I0227 16:49:58.918453 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:49:58 crc kubenswrapper[4751]: I0227 16:49:58.919780 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"62d29c4bd042871716a930e4ba973dc2a54787adada169c002b5efb7ee6d0c17"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 16:49:58 crc kubenswrapper[4751]: I0227 16:49:58.919881 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://62d29c4bd042871716a930e4ba973dc2a54787adada169c002b5efb7ee6d0c17" gracePeriod=600 Feb 27 16:49:59 crc kubenswrapper[4751]: I0227 16:49:59.291470 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.000315 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="62d29c4bd042871716a930e4ba973dc2a54787adada169c002b5efb7ee6d0c17" exitCode=0 Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.000357 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"62d29c4bd042871716a930e4ba973dc2a54787adada169c002b5efb7ee6d0c17"} Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.000387 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0"} Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.000405 4751 scope.go:117] "RemoveContainer" containerID="ef7e9a78c9c006f209ebb578b8c3e17b655897835e4a3ab4f6e482b486441566" Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.154292 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536850-n6f85"] Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.157258 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536850-n6f85" Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.162822 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.162862 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.162824 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.164795 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536850-n6f85"] Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.245174 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds2x4\" (UniqueName: \"kubernetes.io/projected/a542e28d-0700-487f-be1c-2d2c982a03f5-kube-api-access-ds2x4\") pod \"auto-csr-approver-29536850-n6f85\" (UID: \"a542e28d-0700-487f-be1c-2d2c982a03f5\") " pod="openshift-infra/auto-csr-approver-29536850-n6f85" Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.346427 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds2x4\" (UniqueName: \"kubernetes.io/projected/a542e28d-0700-487f-be1c-2d2c982a03f5-kube-api-access-ds2x4\") pod \"auto-csr-approver-29536850-n6f85\" (UID: \"a542e28d-0700-487f-be1c-2d2c982a03f5\") " pod="openshift-infra/auto-csr-approver-29536850-n6f85" Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.365243 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds2x4\" (UniqueName: \"kubernetes.io/projected/a542e28d-0700-487f-be1c-2d2c982a03f5-kube-api-access-ds2x4\") pod \"auto-csr-approver-29536850-n6f85\" (UID: \"a542e28d-0700-487f-be1c-2d2c982a03f5\") " pod="openshift-infra/auto-csr-approver-29536850-n6f85" Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.483135 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536850-n6f85" Feb 27 16:50:00 crc kubenswrapper[4751]: I0227 16:50:00.948238 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536850-n6f85"] Feb 27 16:50:01 crc kubenswrapper[4751]: I0227 16:50:01.016051 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536850-n6f85" event={"ID":"a542e28d-0700-487f-be1c-2d2c982a03f5","Type":"ContainerStarted","Data":"1b13927709671142edf69267fe0a802112c5ea0a152fee2090397267acc47ed3"} Feb 27 16:50:01 crc kubenswrapper[4751]: I0227 16:50:01.388920 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 27 16:50:01 crc kubenswrapper[4751]: I0227 16:50:01.388999 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 27 16:50:03 crc kubenswrapper[4751]: I0227 16:50:03.044202 4751 generic.go:334] "Generic (PLEG): container finished" podID="a542e28d-0700-487f-be1c-2d2c982a03f5" containerID="8ce409aa780846399cf6e0d34303969ec916d50693012fdf4ddd01e09ef96144" exitCode=0 Feb 27 16:50:03 crc kubenswrapper[4751]: I0227 16:50:03.044320 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536850-n6f85" event={"ID":"a542e28d-0700-487f-be1c-2d2c982a03f5","Type":"ContainerDied","Data":"8ce409aa780846399cf6e0d34303969ec916d50693012fdf4ddd01e09ef96144"} Feb 27 16:50:03 crc kubenswrapper[4751]: I0227 16:50:03.284642 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 27 16:50:03 crc kubenswrapper[4751]: I0227 16:50:03.284714 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 27 16:50:04 crc kubenswrapper[4751]: I0227 16:50:04.291653 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 27 16:50:04 crc kubenswrapper[4751]: I0227 16:50:04.300658 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.212:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 27 16:50:04 crc kubenswrapper[4751]: I0227 16:50:04.300696 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.212:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 27 16:50:04 crc kubenswrapper[4751]: I0227 16:50:04.338351 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 27 16:50:04 crc kubenswrapper[4751]: I0227 16:50:04.506390 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536850-n6f85" Feb 27 16:50:04 crc kubenswrapper[4751]: I0227 16:50:04.634915 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ds2x4\" (UniqueName: \"kubernetes.io/projected/a542e28d-0700-487f-be1c-2d2c982a03f5-kube-api-access-ds2x4\") pod \"a542e28d-0700-487f-be1c-2d2c982a03f5\" (UID: \"a542e28d-0700-487f-be1c-2d2c982a03f5\") " Feb 27 16:50:04 crc kubenswrapper[4751]: I0227 16:50:04.653886 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a542e28d-0700-487f-be1c-2d2c982a03f5-kube-api-access-ds2x4" (OuterVolumeSpecName: "kube-api-access-ds2x4") pod "a542e28d-0700-487f-be1c-2d2c982a03f5" (UID: "a542e28d-0700-487f-be1c-2d2c982a03f5"). InnerVolumeSpecName "kube-api-access-ds2x4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:04 crc kubenswrapper[4751]: I0227 16:50:04.737196 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ds2x4\" (UniqueName: \"kubernetes.io/projected/a542e28d-0700-487f-be1c-2d2c982a03f5-kube-api-access-ds2x4\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:05 crc kubenswrapper[4751]: I0227 16:50:05.074634 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536850-n6f85" Feb 27 16:50:05 crc kubenswrapper[4751]: I0227 16:50:05.087673 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536850-n6f85" event={"ID":"a542e28d-0700-487f-be1c-2d2c982a03f5","Type":"ContainerDied","Data":"1b13927709671142edf69267fe0a802112c5ea0a152fee2090397267acc47ed3"} Feb 27 16:50:05 crc kubenswrapper[4751]: I0227 16:50:05.087733 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b13927709671142edf69267fe0a802112c5ea0a152fee2090397267acc47ed3" Feb 27 16:50:05 crc kubenswrapper[4751]: I0227 16:50:05.129966 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 27 16:50:05 crc kubenswrapper[4751]: I0227 16:50:05.599499 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536844-qtl4v"] Feb 27 16:50:05 crc kubenswrapper[4751]: I0227 16:50:05.612734 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536844-qtl4v"] Feb 27 16:50:06 crc kubenswrapper[4751]: I0227 16:50:06.388871 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 27 16:50:06 crc kubenswrapper[4751]: I0227 16:50:06.389252 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 27 16:50:06 crc kubenswrapper[4751]: I0227 16:50:06.538662 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78c6e077-9027-4c5f-a858-6b0b3328682a" path="/var/lib/kubelet/pods/78c6e077-9027-4c5f-a858-6b0b3328682a/volumes" Feb 27 16:50:07 crc kubenswrapper[4751]: I0227 16:50:07.405640 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.214:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 27 16:50:07 crc kubenswrapper[4751]: I0227 16:50:07.405638 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.214:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 27 16:50:13 crc kubenswrapper[4751]: I0227 16:50:13.297002 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 27 16:50:13 crc kubenswrapper[4751]: I0227 16:50:13.298565 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 27 16:50:13 crc kubenswrapper[4751]: I0227 16:50:13.300023 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 27 16:50:13 crc kubenswrapper[4751]: I0227 16:50:13.307431 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 27 16:50:14 crc kubenswrapper[4751]: I0227 16:50:14.182799 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 27 16:50:14 crc kubenswrapper[4751]: I0227 16:50:14.191249 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 27 16:50:16 crc kubenswrapper[4751]: I0227 16:50:16.397739 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 27 16:50:16 crc kubenswrapper[4751]: I0227 16:50:16.398279 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 27 16:50:16 crc kubenswrapper[4751]: I0227 16:50:16.407359 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 27 16:50:16 crc kubenswrapper[4751]: I0227 16:50:16.408009 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 27 16:50:19 crc kubenswrapper[4751]: I0227 16:50:19.838593 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.649328 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.650039 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="176ca33f-0a66-4132-bdf1-4be84eba5b34" containerName="openstackclient" containerID="cri-o://92998f2cdf2e9c4313aa0a3ab7697b4d34436e6d3fd22cb366615eb354f3f91a" gracePeriod=2 Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.666931 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.762580 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-hvcjf"] Feb 27 16:50:41 crc kubenswrapper[4751]: E0227 16:50:41.763019 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a542e28d-0700-487f-be1c-2d2c982a03f5" containerName="oc" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.763040 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a542e28d-0700-487f-be1c-2d2c982a03f5" containerName="oc" Feb 27 16:50:41 crc kubenswrapper[4751]: E0227 16:50:41.763093 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="176ca33f-0a66-4132-bdf1-4be84eba5b34" containerName="openstackclient" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.763100 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="176ca33f-0a66-4132-bdf1-4be84eba5b34" containerName="openstackclient" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.763520 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="176ca33f-0a66-4132-bdf1-4be84eba5b34" containerName="openstackclient" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.763539 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a542e28d-0700-487f-be1c-2d2c982a03f5" containerName="oc" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.764276 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-hvcjf" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.772010 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.775618 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-543a-account-create-update-mxr7p"] Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.776844 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-543a-account-create-update-mxr7p" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.778780 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.790639 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-hvcjf"] Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.812600 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-543a-account-create-update-mxr7p"] Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.850171 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.902364 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-8q4zm"] Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.923082 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-8q4zm"] Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.941057 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dc9beed-8444-4389-8859-234af0090157-operator-scripts\") pod \"root-account-create-update-hvcjf\" (UID: \"7dc9beed-8444-4389-8859-234af0090157\") " pod="openstack/root-account-create-update-hvcjf" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.941201 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftfpv\" (UniqueName: \"kubernetes.io/projected/7dc9beed-8444-4389-8859-234af0090157-kube-api-access-ftfpv\") pod \"root-account-create-update-hvcjf\" (UID: \"7dc9beed-8444-4389-8859-234af0090157\") " pod="openstack/root-account-create-update-hvcjf" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.941260 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c4060a4-e264-4a4a-90ea-4a270cc50940-operator-scripts\") pod \"glance-543a-account-create-update-mxr7p\" (UID: \"6c4060a4-e264-4a4a-90ea-4a270cc50940\") " pod="openstack/glance-543a-account-create-update-mxr7p" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.941285 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmcxg\" (UniqueName: \"kubernetes.io/projected/6c4060a4-e264-4a4a-90ea-4a270cc50940-kube-api-access-gmcxg\") pod \"glance-543a-account-create-update-mxr7p\" (UID: \"6c4060a4-e264-4a4a-90ea-4a270cc50940\") " pod="openstack/glance-543a-account-create-update-mxr7p" Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.977534 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 27 16:50:41 crc kubenswrapper[4751]: I0227 16:50:41.978219 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" containerName="openstack-network-exporter" containerID="cri-o://6c74828590d469e03165fd6f252422867fd14e665dbe3ddfb7a6c1b2f1561bb7" gracePeriod=300 Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.027718 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-fb4c-account-create-update-7hrjf"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.029205 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fb4c-account-create-update-7hrjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.050122 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c4060a4-e264-4a4a-90ea-4a270cc50940-operator-scripts\") pod \"glance-543a-account-create-update-mxr7p\" (UID: \"6c4060a4-e264-4a4a-90ea-4a270cc50940\") " pod="openstack/glance-543a-account-create-update-mxr7p" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.050200 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmcxg\" (UniqueName: \"kubernetes.io/projected/6c4060a4-e264-4a4a-90ea-4a270cc50940-kube-api-access-gmcxg\") pod \"glance-543a-account-create-update-mxr7p\" (UID: \"6c4060a4-e264-4a4a-90ea-4a270cc50940\") " pod="openstack/glance-543a-account-create-update-mxr7p" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.050281 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dc9beed-8444-4389-8859-234af0090157-operator-scripts\") pod \"root-account-create-update-hvcjf\" (UID: \"7dc9beed-8444-4389-8859-234af0090157\") " pod="openstack/root-account-create-update-hvcjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.050444 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftfpv\" (UniqueName: \"kubernetes.io/projected/7dc9beed-8444-4389-8859-234af0090157-kube-api-access-ftfpv\") pod \"root-account-create-update-hvcjf\" (UID: \"7dc9beed-8444-4389-8859-234af0090157\") " pod="openstack/root-account-create-update-hvcjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.050938 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.051256 4751 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.051310 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data podName:cecf602c-dec2-40c6-922c-bf84b707b1b9 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:42.551285148 +0000 UTC m=+1604.698299595 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data") pod "rabbitmq-cell1-server-0" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9") : configmap "rabbitmq-cell1-config-data" not found Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.052940 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dc9beed-8444-4389-8859-234af0090157-operator-scripts\") pod \"root-account-create-update-hvcjf\" (UID: \"7dc9beed-8444-4389-8859-234af0090157\") " pod="openstack/root-account-create-update-hvcjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.055598 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c4060a4-e264-4a4a-90ea-4a270cc50940-operator-scripts\") pod \"glance-543a-account-create-update-mxr7p\" (UID: \"6c4060a4-e264-4a4a-90ea-4a270cc50940\") " pod="openstack/glance-543a-account-create-update-mxr7p" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.093991 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmcxg\" (UniqueName: \"kubernetes.io/projected/6c4060a4-e264-4a4a-90ea-4a270cc50940-kube-api-access-gmcxg\") pod \"glance-543a-account-create-update-mxr7p\" (UID: \"6c4060a4-e264-4a4a-90ea-4a270cc50940\") " pod="openstack/glance-543a-account-create-update-mxr7p" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.099284 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-543a-account-create-update-mxr7p" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.111623 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftfpv\" (UniqueName: \"kubernetes.io/projected/7dc9beed-8444-4389-8859-234af0090157-kube-api-access-ftfpv\") pod \"root-account-create-update-hvcjf\" (UID: \"7dc9beed-8444-4389-8859-234af0090157\") " pod="openstack/root-account-create-update-hvcjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.116462 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-fb4c-account-create-update-7hrjf"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.155157 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-operator-scripts\") pod \"neutron-fb4c-account-create-update-7hrjf\" (UID: \"d7fb2588-cb2f-4495-ab6d-4f6aef939caf\") " pod="openstack/neutron-fb4c-account-create-update-7hrjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.155249 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzc98\" (UniqueName: \"kubernetes.io/projected/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-kube-api-access-nzc98\") pod \"neutron-fb4c-account-create-update-7hrjf\" (UID: \"d7fb2588-cb2f-4495-ab6d-4f6aef939caf\") " pod="openstack/neutron-fb4c-account-create-update-7hrjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.255008 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-543a-account-create-update-5lwvv"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.256438 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-operator-scripts\") pod \"neutron-fb4c-account-create-update-7hrjf\" (UID: \"d7fb2588-cb2f-4495-ab6d-4f6aef939caf\") " pod="openstack/neutron-fb4c-account-create-update-7hrjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.256527 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzc98\" (UniqueName: \"kubernetes.io/projected/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-kube-api-access-nzc98\") pod \"neutron-fb4c-account-create-update-7hrjf\" (UID: \"d7fb2588-cb2f-4495-ab6d-4f6aef939caf\") " pod="openstack/neutron-fb4c-account-create-update-7hrjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.257532 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-operator-scripts\") pod \"neutron-fb4c-account-create-update-7hrjf\" (UID: \"d7fb2588-cb2f-4495-ab6d-4f6aef939caf\") " pod="openstack/neutron-fb4c-account-create-update-7hrjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.335377 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-fb4c-account-create-update-crj56"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.353804 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-543a-account-create-update-5lwvv"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.402202 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzc98\" (UniqueName: \"kubernetes.io/projected/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-kube-api-access-nzc98\") pod \"neutron-fb4c-account-create-update-7hrjf\" (UID: \"d7fb2588-cb2f-4495-ab6d-4f6aef939caf\") " pod="openstack/neutron-fb4c-account-create-update-7hrjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.402891 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fb4c-account-create-update-7hrjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.403096 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-hvcjf" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.403253 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-fb4c-account-create-update-crj56"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.419116 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-mvlkh"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.487695 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-mvlkh"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.588595 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ec80714-54dc-4207-9b0e-0eb76833d496" path="/var/lib/kubelet/pods/8ec80714-54dc-4207-9b0e-0eb76833d496/volumes" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.589476 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92d68198-8212-48d0-b42a-1be37cc135f2" path="/var/lib/kubelet/pods/92d68198-8212-48d0-b42a-1be37cc135f2/volumes" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.590053 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf785eb4-7c95-411e-92ae-2be6b08f4d43" path="/var/lib/kubelet/pods/cf785eb4-7c95-411e-92ae-2be6b08f4d43/volumes" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.599520 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f306ad12-0f04-4414-8393-8ab5cc63c8b5" path="/var/lib/kubelet/pods/f306ad12-0f04-4414-8393-8ab5cc63c8b5/volumes" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.600237 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-d758-account-create-update-jpvqv"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.601232 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d758-account-create-update-jpvqv"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.601247 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.601260 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-d758-account-create-update-bdsrf"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.617904 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d758-account-create-update-jpvqv" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.618283 4751 generic.go:334] "Generic (PLEG): container finished" podID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" containerID="6c74828590d469e03165fd6f252422867fd14e665dbe3ddfb7a6c1b2f1561bb7" exitCode=2 Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.618326 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="95b4a6cb-a957-4a31-8510-292eb1305ad6" containerName="openstack-network-exporter" containerID="cri-o://5f0141511ca3d3aa75b1878aa729a5715a4ce124f70bcdf3e79e44f61c356a32" gracePeriod=300 Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.618385 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.618443 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"438372fd-dcc8-47e3-a547-c8a1729b2f1f","Type":"ContainerDied","Data":"6c74828590d469e03165fd6f252422867fd14e665dbe3ddfb7a6c1b2f1561bb7"} Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.619047 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerName="ovn-northd" containerID="cri-o://f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d" gracePeriod=30 Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.619126 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerName="openstack-network-exporter" containerID="cri-o://8f37b9a53b57fd59b8d193823dd9bac3b95253b3c09ec6d44395ab006d4399e8" gracePeriod=30 Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.619790 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6c58n\" (UniqueName: \"kubernetes.io/projected/b959a608-80f8-43f4-81a4-203b9a27467d-kube-api-access-6c58n\") pod \"cinder-d758-account-create-update-jpvqv\" (UID: \"b959a608-80f8-43f4-81a4-203b9a27467d\") " pod="openstack/cinder-d758-account-create-update-jpvqv" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.621162 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b959a608-80f8-43f4-81a4-203b9a27467d-operator-scripts\") pod \"cinder-d758-account-create-update-jpvqv\" (UID: \"b959a608-80f8-43f4-81a4-203b9a27467d\") " pod="openstack/cinder-d758-account-create-update-jpvqv" Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.624025 4751 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.624083 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data podName:cecf602c-dec2-40c6-922c-bf84b707b1b9 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:43.624067725 +0000 UTC m=+1605.771082172 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data") pod "rabbitmq-cell1-server-0" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9") : configmap "rabbitmq-cell1-config-data" not found Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.628936 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.648470 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-d758-account-create-update-bdsrf"] Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.696750 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.701923 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.741326 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b959a608-80f8-43f4-81a4-203b9a27467d-operator-scripts\") pod \"cinder-d758-account-create-update-jpvqv\" (UID: \"b959a608-80f8-43f4-81a4-203b9a27467d\") " pod="openstack/cinder-d758-account-create-update-jpvqv" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.741598 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6c58n\" (UniqueName: \"kubernetes.io/projected/b959a608-80f8-43f4-81a4-203b9a27467d-kube-api-access-6c58n\") pod \"cinder-d758-account-create-update-jpvqv\" (UID: \"b959a608-80f8-43f4-81a4-203b9a27467d\") " pod="openstack/cinder-d758-account-create-update-jpvqv" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.744889 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b959a608-80f8-43f4-81a4-203b9a27467d-operator-scripts\") pod \"cinder-d758-account-create-update-jpvqv\" (UID: \"b959a608-80f8-43f4-81a4-203b9a27467d\") " pod="openstack/cinder-d758-account-create-update-jpvqv" Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.769104 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.837586 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.838426 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Liveness" pod="openstack/ovn-northd-0" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerName="ovn-northd" Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.867731 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.898519 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6c58n\" (UniqueName: \"kubernetes.io/projected/b959a608-80f8-43f4-81a4-203b9a27467d-kube-api-access-6c58n\") pod \"cinder-d758-account-create-update-jpvqv\" (UID: \"b959a608-80f8-43f4-81a4-203b9a27467d\") " pod="openstack/cinder-d758-account-create-update-jpvqv" Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.908528 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Feb 27 16:50:42 crc kubenswrapper[4751]: E0227 16:50:42.908702 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerName="ovn-northd" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.908860 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-vgcdl"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.930885 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-vgcdl"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.950202 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-c1fb-account-create-update-zkj9t"] Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.951427 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c1fb-account-create-update-zkj9t" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.955325 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.959991 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" containerName="ovsdbserver-sb" containerID="cri-o://21f622d5f20191c0f88a55bea7b29298d2743cf401c56a29ef7e4407b0fd82c6" gracePeriod=300 Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.966752 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="95b4a6cb-a957-4a31-8510-292eb1305ad6" containerName="ovsdbserver-nb" containerID="cri-o://67b68970dcc70c2551fa94757bc51fc1016792d1261833505238d86e1d89cc24" gracePeriod=300 Feb 27 16:50:42 crc kubenswrapper[4751]: I0227 16:50:42.992389 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.003028 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d758-account-create-update-jpvqv" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.055014 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-4xmph"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.055364 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" podUID="ef097fe8-b372-4175-a5be-15fbb62905c9" containerName="dnsmasq-dns" containerID="cri-o://ff7df5badcf4d604fc8a4002a9fdc2e25bffaf7333ce4b9ec101b40ca09bbc07" gracePeriod=10 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.076321 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v66zf\" (UniqueName: \"kubernetes.io/projected/465988d0-be74-4295-bb31-4265148803e8-kube-api-access-v66zf\") pod \"nova-api-c1fb-account-create-update-zkj9t\" (UID: \"465988d0-be74-4295-bb31-4265148803e8\") " pod="openstack/nova-api-c1fb-account-create-update-zkj9t" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.076561 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/465988d0-be74-4295-bb31-4265148803e8-operator-scripts\") pod \"nova-api-c1fb-account-create-update-zkj9t\" (UID: \"465988d0-be74-4295-bb31-4265148803e8\") " pod="openstack/nova-api-c1fb-account-create-update-zkj9t" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.084694 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c1fb-account-create-update-zkj9t"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.111442 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-c1fb-account-create-update-7gk8w"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.125459 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-c1fb-account-create-update-7gk8w"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.157686 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-frvvc"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.177645 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/465988d0-be74-4295-bb31-4265148803e8-operator-scripts\") pod \"nova-api-c1fb-account-create-update-zkj9t\" (UID: \"465988d0-be74-4295-bb31-4265148803e8\") " pod="openstack/nova-api-c1fb-account-create-update-zkj9t" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.177768 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v66zf\" (UniqueName: \"kubernetes.io/projected/465988d0-be74-4295-bb31-4265148803e8-kube-api-access-v66zf\") pod \"nova-api-c1fb-account-create-update-zkj9t\" (UID: \"465988d0-be74-4295-bb31-4265148803e8\") " pod="openstack/nova-api-c1fb-account-create-update-zkj9t" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.179167 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/465988d0-be74-4295-bb31-4265148803e8-operator-scripts\") pod \"nova-api-c1fb-account-create-update-zkj9t\" (UID: \"465988d0-be74-4295-bb31-4265148803e8\") " pod="openstack/nova-api-c1fb-account-create-update-zkj9t" Feb 27 16:50:43 crc kubenswrapper[4751]: E0227 16:50:43.180741 4751 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Feb 27 16:50:43 crc kubenswrapper[4751]: E0227 16:50:43.180791 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data podName:51a81c6a-6814-412d-b77d-e741f1f74446 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:43.680776344 +0000 UTC m=+1605.827790801 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data") pod "rabbitmq-server-0" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446") : configmap "rabbitmq-config-data" not found Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.202038 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5c5d5b6fdd-9d8xv"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.202250 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5c5d5b6fdd-9d8xv" podUID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" containerName="placement-log" containerID="cri-o://fe0b48cb4c4111dfc56e9cc80355b87b652df2aa7701be61b0f630ca7e55427a" gracePeriod=30 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.203829 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5c5d5b6fdd-9d8xv" podUID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" containerName="placement-api" containerID="cri-o://d68a8fb32c3c122cd258ca89b0d0d8f27592db1bade310c58767879538bba0eb" gracePeriod=30 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.220754 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v66zf\" (UniqueName: \"kubernetes.io/projected/465988d0-be74-4295-bb31-4265148803e8-kube-api-access-v66zf\") pod \"nova-api-c1fb-account-create-update-zkj9t\" (UID: \"465988d0-be74-4295-bb31-4265148803e8\") " pod="openstack/nova-api-c1fb-account-create-update-zkj9t" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.244515 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-gdjfm"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.264876 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-xcsrx"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.265050 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-xcsrx" podUID="da8c688d-4446-4f25-853d-0f694094d0af" containerName="openstack-network-exporter" containerID="cri-o://d795f9fa74378811b1eaa8d00254a0ee94069992318ba57c1f1494363a208bf8" gracePeriod=30 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.340339 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-e4ff-account-create-update-5zkr9"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.384608 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-e4ff-account-create-update-5zkr9"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.431880 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-2pj4h"] Feb 27 16:50:43 crc kubenswrapper[4751]: E0227 16:50:43.502909 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 16:50:43 crc kubenswrapper[4751]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: MYSQL_CMD="mysql -h -u root -P 3306" Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: if [ -n "glance" ]; then Feb 27 16:50:43 crc kubenswrapper[4751]: GRANT_DATABASE="glance" Feb 27 16:50:43 crc kubenswrapper[4751]: else Feb 27 16:50:43 crc kubenswrapper[4751]: GRANT_DATABASE="*" Feb 27 16:50:43 crc kubenswrapper[4751]: fi Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: # going for maximum compatibility here: Feb 27 16:50:43 crc kubenswrapper[4751]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Feb 27 16:50:43 crc kubenswrapper[4751]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Feb 27 16:50:43 crc kubenswrapper[4751]: # 3. create user with CREATE but then do all password and TLS with ALTER to Feb 27 16:50:43 crc kubenswrapper[4751]: # support updates Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: $MYSQL_CMD < logger="UnhandledError" Feb 27 16:50:43 crc kubenswrapper[4751]: E0227 16:50:43.504570 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"glance-db-secret\\\" not found\"" pod="openstack/glance-543a-account-create-update-mxr7p" podUID="6c4060a4-e264-4a4a-90ea-4a270cc50940" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.547214 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-2pj4h"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.600521 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-d540-account-create-update-s467z"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.645135 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-fbdnl"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.671465 4751 generic.go:334] "Generic (PLEG): container finished" podID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" containerID="fe0b48cb4c4111dfc56e9cc80355b87b652df2aa7701be61b0f630ca7e55427a" exitCode=143 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.671530 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c5d5b6fdd-9d8xv" event={"ID":"3c5e58eb-31a4-4253-8cb9-a9486bb2d955","Type":"ContainerDied","Data":"fe0b48cb4c4111dfc56e9cc80355b87b652df2aa7701be61b0f630ca7e55427a"} Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.672907 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-xcsrx_da8c688d-4446-4f25-853d-0f694094d0af/openstack-network-exporter/0.log" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.672939 4751 generic.go:334] "Generic (PLEG): container finished" podID="da8c688d-4446-4f25-853d-0f694094d0af" containerID="d795f9fa74378811b1eaa8d00254a0ee94069992318ba57c1f1494363a208bf8" exitCode=2 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.672971 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-xcsrx" event={"ID":"da8c688d-4446-4f25-853d-0f694094d0af","Type":"ContainerDied","Data":"d795f9fa74378811b1eaa8d00254a0ee94069992318ba57c1f1494363a208bf8"} Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.689706 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-fbdnl"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.701689 4751 generic.go:334] "Generic (PLEG): container finished" podID="ef097fe8-b372-4175-a5be-15fbb62905c9" containerID="ff7df5badcf4d604fc8a4002a9fdc2e25bffaf7333ce4b9ec101b40ca09bbc07" exitCode=0 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.701993 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" event={"ID":"ef097fe8-b372-4175-a5be-15fbb62905c9","Type":"ContainerDied","Data":"ff7df5badcf4d604fc8a4002a9fdc2e25bffaf7333ce4b9ec101b40ca09bbc07"} Feb 27 16:50:43 crc kubenswrapper[4751]: E0227 16:50:43.715601 4751 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Feb 27 16:50:43 crc kubenswrapper[4751]: E0227 16:50:43.715872 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data podName:51a81c6a-6814-412d-b77d-e741f1f74446 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:44.715857057 +0000 UTC m=+1606.862871504 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data") pod "rabbitmq-server-0" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446") : configmap "rabbitmq-config-data" not found Feb 27 16:50:43 crc kubenswrapper[4751]: E0227 16:50:43.716326 4751 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Feb 27 16:50:43 crc kubenswrapper[4751]: E0227 16:50:43.716443 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data podName:cecf602c-dec2-40c6-922c-bf84b707b1b9 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:45.716434452 +0000 UTC m=+1607.863448899 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data") pod "rabbitmq-cell1-server-0" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9") : configmap "rabbitmq-cell1-config-data" not found Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.737467 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-d540-account-create-update-s467z"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.742504 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_95b4a6cb-a957-4a31-8510-292eb1305ad6/ovsdbserver-nb/0.log" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.742730 4751 generic.go:334] "Generic (PLEG): container finished" podID="95b4a6cb-a957-4a31-8510-292eb1305ad6" containerID="5f0141511ca3d3aa75b1878aa729a5715a4ce124f70bcdf3e79e44f61c356a32" exitCode=2 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.745305 4751 generic.go:334] "Generic (PLEG): container finished" podID="95b4a6cb-a957-4a31-8510-292eb1305ad6" containerID="67b68970dcc70c2551fa94757bc51fc1016792d1261833505238d86e1d89cc24" exitCode=143 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.742805 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"95b4a6cb-a957-4a31-8510-292eb1305ad6","Type":"ContainerDied","Data":"5f0141511ca3d3aa75b1878aa729a5715a4ce124f70bcdf3e79e44f61c356a32"} Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.745625 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"95b4a6cb-a957-4a31-8510-292eb1305ad6","Type":"ContainerDied","Data":"67b68970dcc70c2551fa94757bc51fc1016792d1261833505238d86e1d89cc24"} Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.750559 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-6s84l"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.753551 4751 generic.go:334] "Generic (PLEG): container finished" podID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerID="8f37b9a53b57fd59b8d193823dd9bac3b95253b3c09ec6d44395ab006d4399e8" exitCode=2 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.753612 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"36495e7a-b8f8-4d54-a504-e92bb6211327","Type":"ContainerDied","Data":"8f37b9a53b57fd59b8d193823dd9bac3b95253b3c09ec6d44395ab006d4399e8"} Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.792922 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-6s84l"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.797118 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-c2k4l"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.811464 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-c2k4l"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.814320 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_438372fd-dcc8-47e3-a547-c8a1729b2f1f/ovsdbserver-sb/0.log" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.814358 4751 generic.go:334] "Generic (PLEG): container finished" podID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" containerID="21f622d5f20191c0f88a55bea7b29298d2743cf401c56a29ef7e4407b0fd82c6" exitCode=143 Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.814435 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"438372fd-dcc8-47e3-a547-c8a1729b2f1f","Type":"ContainerDied","Data":"21f622d5f20191c0f88a55bea7b29298d2743cf401c56a29ef7e4407b0fd82c6"} Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.819240 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-aa96-account-create-update-d2dtn"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.820248 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-543a-account-create-update-mxr7p" event={"ID":"6c4060a4-e264-4a4a-90ea-4a270cc50940","Type":"ContainerStarted","Data":"a58242e3dae08117756e1572de969d04e33194f3205cdf4599e5cf0a72b2f8de"} Feb 27 16:50:43 crc kubenswrapper[4751]: E0227 16:50:43.825245 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 16:50:43 crc kubenswrapper[4751]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: MYSQL_CMD="mysql -h -u root -P 3306" Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: if [ -n "glance" ]; then Feb 27 16:50:43 crc kubenswrapper[4751]: GRANT_DATABASE="glance" Feb 27 16:50:43 crc kubenswrapper[4751]: else Feb 27 16:50:43 crc kubenswrapper[4751]: GRANT_DATABASE="*" Feb 27 16:50:43 crc kubenswrapper[4751]: fi Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: # going for maximum compatibility here: Feb 27 16:50:43 crc kubenswrapper[4751]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Feb 27 16:50:43 crc kubenswrapper[4751]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Feb 27 16:50:43 crc kubenswrapper[4751]: # 3. create user with CREATE but then do all password and TLS with ALTER to Feb 27 16:50:43 crc kubenswrapper[4751]: # support updates Feb 27 16:50:43 crc kubenswrapper[4751]: Feb 27 16:50:43 crc kubenswrapper[4751]: $MYSQL_CMD < logger="UnhandledError" Feb 27 16:50:43 crc kubenswrapper[4751]: E0227 16:50:43.826616 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"glance-db-secret\\\" not found\"" pod="openstack/glance-543a-account-create-update-mxr7p" podUID="6c4060a4-e264-4a4a-90ea-4a270cc50940" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.831122 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-sz5bd"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.839444 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-sz5bd"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.860095 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-aa96-account-create-update-d2dtn"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.860101 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c1fb-account-create-update-zkj9t" Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.929612 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-tfsfv"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.935522 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-tfsfv"] Feb 27 16:50:43 crc kubenswrapper[4751]: I0227 16:50:43.970676 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-pzgfd"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.030968 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-pzgfd"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.057066 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.070009 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-server" containerID="cri-o://3d28978244e9cafd7b1ee3ade5f195d2ae28102706cdb9083ebd620acc9c5453" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.070614 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="swift-recon-cron" containerID="cri-o://5d9b075940c17ccffe3c35e09be4bf03a3f95fc97562c089e3ed06153ce12e22" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.070692 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="rsync" containerID="cri-o://de66ee2999fac29e22b4821b042f3b6c8bcd8af2215e1895604af96a423ffd6d" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.070751 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-expirer" containerID="cri-o://8d3c817e96059d70b12ef7286c0552c07aa2499059b8053476c3d15e8305625f" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.070801 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-updater" containerID="cri-o://83ce35274bb0792c55293b6011d40b169da183543e50dad4bd7e201d6f6a7146" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.070846 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-auditor" containerID="cri-o://23f3e1c514c8b828bd9ae5bb87e214f223a4588483efcabd156863e7581c145e" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.070890 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-replicator" containerID="cri-o://d9d8a8e1e05ef6e1c7673a877cb8fbfae2175606b7184f573f2d5f77b2fb1d24" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.070932 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-server" containerID="cri-o://4ca99524eea4a99a550fc63f10a9bcbfd3c3e4a41fa126a72fb0140d5e9d14f0" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.070977 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-updater" containerID="cri-o://e79d296ac1de0b104dda270fbeebf21a4f77921165bf8426ffb98a3ab9fd68bd" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.071027 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-auditor" containerID="cri-o://b93427209a64b6f4800316cc95e813dbfe839f3c1cc330375575973fc2bd09ba" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.071076 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-replicator" containerID="cri-o://681fe80f14cb936ac1603940896731427ac9c788d645cc1ee250520ef0030927" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.071129 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-server" containerID="cri-o://3e77e6f4a377245e2e374c8bf467f0ec059e1247d85be6db7d91b8496e308e18" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.071168 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-reaper" containerID="cri-o://3781fd0007798258c29970e2eca3675df69d6304df681043d13fc310c53b5b2d" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.071448 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-replicator" containerID="cri-o://4cafaf593ca2edd1eaf7ce55b2075b944bb67896f93d4fa2ddbe908cbb542c69" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.071203 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-auditor" containerID="cri-o://a303cad8ff9d037361da17d1a15f7f3b922522d9c253a7d490bbde1a81132839" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.138550 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.185338 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_95b4a6cb-a957-4a31-8510-292eb1305ad6/ovsdbserver-nb/0.log" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.185433 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.240627 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-543a-account-create-update-mxr7p"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.270272 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-nb\") pod \"ef097fe8-b372-4175-a5be-15fbb62905c9\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.273286 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rglq9\" (UniqueName: \"kubernetes.io/projected/ef097fe8-b372-4175-a5be-15fbb62905c9-kube-api-access-rglq9\") pod \"ef097fe8-b372-4175-a5be-15fbb62905c9\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.274577 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-swift-storage-0\") pod \"ef097fe8-b372-4175-a5be-15fbb62905c9\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.275634 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-config\") pod \"ef097fe8-b372-4175-a5be-15fbb62905c9\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.275786 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-sb\") pod \"ef097fe8-b372-4175-a5be-15fbb62905c9\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.275943 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-svc\") pod \"ef097fe8-b372-4175-a5be-15fbb62905c9\" (UID: \"ef097fe8-b372-4175-a5be-15fbb62905c9\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.280481 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef097fe8-b372-4175-a5be-15fbb62905c9-kube-api-access-rglq9" (OuterVolumeSpecName: "kube-api-access-rglq9") pod "ef097fe8-b372-4175-a5be-15fbb62905c9" (UID: "ef097fe8-b372-4175-a5be-15fbb62905c9"). InnerVolumeSpecName "kube-api-access-rglq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.313608 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-649c97d5df-x4tkf"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.314163 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-649c97d5df-x4tkf" podUID="16754588-ca23-484b-b8e8-21bc94c640f3" containerName="neutron-api" containerID="cri-o://a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.314721 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-649c97d5df-x4tkf" podUID="16754588-ca23-484b-b8e8-21bc94c640f3" containerName="neutron-httpd" containerID="cri-o://8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.335135 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.335421 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2ad24b50-556b-4799-a598-b7618c1664fd" containerName="glance-log" containerID="cri-o://e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.335566 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2ad24b50-556b-4799-a598-b7618c1664fd" containerName="glance-httpd" containerID="cri-o://784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.347417 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ef097fe8-b372-4175-a5be-15fbb62905c9" (UID: "ef097fe8-b372-4175-a5be-15fbb62905c9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.358654 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-f6xg8"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.368017 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-f6xg8"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.373746 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ef097fe8-b372-4175-a5be-15fbb62905c9" (UID: "ef097fe8-b372-4175-a5be-15fbb62905c9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.376710 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.377081 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" containerName="cinder-scheduler" containerID="cri-o://975963e810405e7a1f164ed08541517bb44532e23c9b968a1511ad894a22d948" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.377578 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" containerName="probe" containerID="cri-o://2ecbec27a7197208f58327c1b614eb58bf364a81b50228cd0b0b7068505b1049" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.380226 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdb-rundir\") pod \"95b4a6cb-a957-4a31-8510-292eb1305ad6\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.380266 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-combined-ca-bundle\") pod \"95b4a6cb-a957-4a31-8510-292eb1305ad6\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.380305 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdbserver-nb-tls-certs\") pod \"95b4a6cb-a957-4a31-8510-292eb1305ad6\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.380326 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-scripts\") pod \"95b4a6cb-a957-4a31-8510-292eb1305ad6\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.380347 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-config\") pod \"95b4a6cb-a957-4a31-8510-292eb1305ad6\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.380517 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-js87m\" (UniqueName: \"kubernetes.io/projected/95b4a6cb-a957-4a31-8510-292eb1305ad6-kube-api-access-js87m\") pod \"95b4a6cb-a957-4a31-8510-292eb1305ad6\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.380553 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-metrics-certs-tls-certs\") pod \"95b4a6cb-a957-4a31-8510-292eb1305ad6\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.380632 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"95b4a6cb-a957-4a31-8510-292eb1305ad6\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.380847 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "95b4a6cb-a957-4a31-8510-292eb1305ad6" (UID: "95b4a6cb-a957-4a31-8510-292eb1305ad6"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.380997 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rglq9\" (UniqueName: \"kubernetes.io/projected/ef097fe8-b372-4175-a5be-15fbb62905c9-kube-api-access-rglq9\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.381021 4751 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.381030 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.381043 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.381231 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-scripts" (OuterVolumeSpecName: "scripts") pod "95b4a6cb-a957-4a31-8510-292eb1305ad6" (UID: "95b4a6cb-a957-4a31-8510-292eb1305ad6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.381689 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-config" (OuterVolumeSpecName: "config") pod "95b4a6cb-a957-4a31-8510-292eb1305ad6" (UID: "95b4a6cb-a957-4a31-8510-292eb1305ad6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.390502 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovs-vswitchd" containerID="cri-o://3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" gracePeriod=29 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.390637 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-f47b-account-create-update-nwbj8"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.395539 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "95b4a6cb-a957-4a31-8510-292eb1305ad6" (UID: "95b4a6cb-a957-4a31-8510-292eb1305ad6"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.397247 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ef097fe8-b372-4175-a5be-15fbb62905c9" (UID: "ef097fe8-b372-4175-a5be-15fbb62905c9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.399370 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95b4a6cb-a957-4a31-8510-292eb1305ad6-kube-api-access-js87m" (OuterVolumeSpecName: "kube-api-access-js87m") pod "95b4a6cb-a957-4a31-8510-292eb1305ad6" (UID: "95b4a6cb-a957-4a31-8510-292eb1305ad6"). InnerVolumeSpecName "kube-api-access-js87m". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.401350 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-f47b-account-create-update-nwbj8"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.417061 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.417339 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ef932397-22e9-4d46-90e3-57076299d4cf" containerName="glance-log" containerID="cri-o://d6c9a2d485d12adf1118656837947a9c97a35defd0392ad64dca5800e77a1603" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.417529 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ef932397-22e9-4d46-90e3-57076299d4cf" containerName="glance-httpd" containerID="cri-o://2845974abbc25e68928a72daeb08093bf2536ab0bc6998e59ff8fa1ec52eba91" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.427614 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.427889 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="27f559b3-2c7d-4567-b836-702db66d74ae" containerName="cinder-api-log" containerID="cri-o://1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.427924 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="27f559b3-2c7d-4567-b836-702db66d74ae" containerName="cinder-api" containerID="cri-o://fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.441901 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.467506 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ef097fe8-b372-4175-a5be-15fbb62905c9" (UID: "ef097fe8-b372-4175-a5be-15fbb62905c9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.474624 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-config" (OuterVolumeSpecName: "config") pod "ef097fe8-b372-4175-a5be-15fbb62905c9" (UID: "ef097fe8-b372-4175-a5be-15fbb62905c9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.483073 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-qxvzf"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.484148 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "95b4a6cb-a957-4a31-8510-292eb1305ad6" (UID: "95b4a6cb-a957-4a31-8510-292eb1305ad6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.484541 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-combined-ca-bundle\") pod \"95b4a6cb-a957-4a31-8510-292eb1305ad6\" (UID: \"95b4a6cb-a957-4a31-8510-292eb1305ad6\") " Feb 27 16:50:44 crc kubenswrapper[4751]: W0227 16:50:44.484668 4751 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/95b4a6cb-a957-4a31-8510-292eb1305ad6/volumes/kubernetes.io~secret/combined-ca-bundle Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.484690 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "95b4a6cb-a957-4a31-8510-292eb1305ad6" (UID: "95b4a6cb-a957-4a31-8510-292eb1305ad6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.485272 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-js87m\" (UniqueName: \"kubernetes.io/projected/95b4a6cb-a957-4a31-8510-292eb1305ad6-kube-api-access-js87m\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.485293 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.485303 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.485312 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef097fe8-b372-4175-a5be-15fbb62905c9-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.485329 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.485338 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.485348 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.485355 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95b4a6cb-a957-4a31-8510-292eb1305ad6-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.486705 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_438372fd-dcc8-47e3-a547-c8a1729b2f1f/ovsdbserver-sb/0.log" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.486776 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.492186 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-qxvzf"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.498840 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-543a-account-create-update-mxr7p"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.512387 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.512675 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-log" containerID="cri-o://efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.512904 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-metadata" containerID="cri-o://95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.518171 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-d9bcd5f6c-zlj75"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.518393 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" podUID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" containerName="barbican-worker-log" containerID="cri-o://42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.518575 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" podUID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" containerName="barbican-worker" containerID="cri-o://72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.528371 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.535224 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0404436e-b5d2-4743-9d81-5bd0bab5b1d5" path="/var/lib/kubelet/pods/0404436e-b5d2-4743-9d81-5bd0bab5b1d5/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.550967 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="cecf602c-dec2-40c6-922c-bf84b707b1b9" containerName="rabbitmq" containerID="cri-o://549fd5c24da2dfcd4fa0ba0f62c30ff6278b4f64c3189582850edb5093bc8b67" gracePeriod=604800 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.563095 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "95b4a6cb-a957-4a31-8510-292eb1305ad6" (UID: "95b4a6cb-a957-4a31-8510-292eb1305ad6"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.573855 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10cb8075-6c76-438d-8ba7-cacfb6acd7fe" path="/var/lib/kubelet/pods/10cb8075-6c76-438d-8ba7-cacfb6acd7fe/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.574952 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="292dc6c8-2b07-4546-acc2-8cc465c17d4f" path="/var/lib/kubelet/pods/292dc6c8-2b07-4546-acc2-8cc465c17d4f/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.583752 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39d227f2-e298-4b47-892b-a9a58e73b3d0" path="/var/lib/kubelet/pods/39d227f2-e298-4b47-892b-a9a58e73b3d0/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.592481 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51f9db5a-ab58-4795-b09f-c2df5406c0cf" path="/var/lib/kubelet/pods/51f9db5a-ab58-4795-b09f-c2df5406c0cf/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.586725 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "438372fd-dcc8-47e3-a547-c8a1729b2f1f" (UID: "438372fd-dcc8-47e3-a547-c8a1729b2f1f"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.586196 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdb-rundir\") pod \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.596657 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-scripts\") pod \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.596686 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-combined-ca-bundle\") pod \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.596720 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-metrics-certs-tls-certs\") pod \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.596750 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.596837 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdbserver-sb-tls-certs\") pod \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.596859 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwntj\" (UniqueName: \"kubernetes.io/projected/438372fd-dcc8-47e3-a547-c8a1729b2f1f-kube-api-access-hwntj\") pod \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.596881 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-config\") pod \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\" (UID: \"438372fd-dcc8-47e3-a547-c8a1729b2f1f\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.597725 4751 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.597742 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.597750 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.602445 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-config" (OuterVolumeSpecName: "config") pod "438372fd-dcc8-47e3-a547-c8a1729b2f1f" (UID: "438372fd-dcc8-47e3-a547-c8a1729b2f1f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.604577 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e4eeeef-55a2-4656-ada6-c653949d6b7f" path="/var/lib/kubelet/pods/6e4eeeef-55a2-4656-ada6-c653949d6b7f/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.605345 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80c6b259-7f53-44bc-9230-adeacd7d9cf6" path="/var/lib/kubelet/pods/80c6b259-7f53-44bc-9230-adeacd7d9cf6/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.605868 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81347661-57c9-48d8-8b26-c6eddbfe887c" path="/var/lib/kubelet/pods/81347661-57c9-48d8-8b26-c6eddbfe887c/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.606378 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88022d82-f750-458c-8f97-a34d3eaef634" path="/var/lib/kubelet/pods/88022d82-f750-458c-8f97-a34d3eaef634/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.612169 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae" path="/var/lib/kubelet/pods/a4e28eb4-4e14-4cc7-b2be-a5cce68dfeae/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.613105 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac13e063-9022-462c-9f6c-0c73828106f8" path="/var/lib/kubelet/pods/ac13e063-9022-462c-9f6c-0c73828106f8/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.615109 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd714706-b63d-4d97-b9df-8ac662e9dfb0" path="/var/lib/kubelet/pods/bd714706-b63d-4d97-b9df-8ac662e9dfb0/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.618717 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-scripts" (OuterVolumeSpecName: "scripts") pod "438372fd-dcc8-47e3-a547-c8a1729b2f1f" (UID: "438372fd-dcc8-47e3-a547-c8a1729b2f1f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.619806 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0298748-d6b6-46e7-a34d-381cf00a4aed" path="/var/lib/kubelet/pods/c0298748-d6b6-46e7-a34d-381cf00a4aed/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.620392 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2c399bf-5a40-4e29-9056-60b030211a97" path="/var/lib/kubelet/pods/e2c399bf-5a40-4e29-9056-60b030211a97/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.620927 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5af617b-32bc-43a9-a8e0-6bb1fec1b4df" path="/var/lib/kubelet/pods/f5af617b-32bc-43a9-a8e0-6bb1fec1b4df/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.625467 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc32e789-24cd-4056-ae5d-a52e12c03df1" path="/var/lib/kubelet/pods/fc32e789-24cd-4056-ae5d-a52e12c03df1/volumes" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.627104 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/438372fd-dcc8-47e3-a547-c8a1729b2f1f-kube-api-access-hwntj" (OuterVolumeSpecName: "kube-api-access-hwntj") pod "438372fd-dcc8-47e3-a547-c8a1729b2f1f" (UID: "438372fd-dcc8-47e3-a547-c8a1729b2f1f"). InnerVolumeSpecName "kube-api-access-hwntj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.636386 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "438372fd-dcc8-47e3-a547-c8a1729b2f1f" (UID: "438372fd-dcc8-47e3-a547-c8a1729b2f1f"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.644599 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "95b4a6cb-a957-4a31-8510-292eb1305ad6" (UID: "95b4a6cb-a957-4a31-8510-292eb1305ad6"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.660755 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "438372fd-dcc8-47e3-a547-c8a1729b2f1f" (UID: "438372fd-dcc8-47e3-a547-c8a1729b2f1f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: E0227 16:50:44.676669 4751 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Feb 27 16:50:44 crc kubenswrapper[4751]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Feb 27 16:50:44 crc kubenswrapper[4751]: + source /usr/local/bin/container-scripts/functions Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNBridge=br-int Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNRemote=tcp:localhost:6642 Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNEncapType=geneve Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNAvailabilityZones= Feb 27 16:50:44 crc kubenswrapper[4751]: ++ EnableChassisAsGateway=true Feb 27 16:50:44 crc kubenswrapper[4751]: ++ PhysicalNetworks= Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNHostName= Feb 27 16:50:44 crc kubenswrapper[4751]: ++ DB_FILE=/etc/openvswitch/conf.db Feb 27 16:50:44 crc kubenswrapper[4751]: ++ ovs_dir=/var/lib/openvswitch Feb 27 16:50:44 crc kubenswrapper[4751]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Feb 27 16:50:44 crc kubenswrapper[4751]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Feb 27 16:50:44 crc kubenswrapper[4751]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Feb 27 16:50:44 crc kubenswrapper[4751]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Feb 27 16:50:44 crc kubenswrapper[4751]: + sleep 0.5 Feb 27 16:50:44 crc kubenswrapper[4751]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Feb 27 16:50:44 crc kubenswrapper[4751]: + sleep 0.5 Feb 27 16:50:44 crc kubenswrapper[4751]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Feb 27 16:50:44 crc kubenswrapper[4751]: + cleanup_ovsdb_server_semaphore Feb 27 16:50:44 crc kubenswrapper[4751]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Feb 27 16:50:44 crc kubenswrapper[4751]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Feb 27 16:50:44 crc kubenswrapper[4751]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-frvvc" message=< Feb 27 16:50:44 crc kubenswrapper[4751]: Exiting ovsdb-server (5) [ OK ] Feb 27 16:50:44 crc kubenswrapper[4751]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Feb 27 16:50:44 crc kubenswrapper[4751]: + source /usr/local/bin/container-scripts/functions Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNBridge=br-int Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNRemote=tcp:localhost:6642 Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNEncapType=geneve Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNAvailabilityZones= Feb 27 16:50:44 crc kubenswrapper[4751]: ++ EnableChassisAsGateway=true Feb 27 16:50:44 crc kubenswrapper[4751]: ++ PhysicalNetworks= Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNHostName= Feb 27 16:50:44 crc kubenswrapper[4751]: ++ DB_FILE=/etc/openvswitch/conf.db Feb 27 16:50:44 crc kubenswrapper[4751]: ++ ovs_dir=/var/lib/openvswitch Feb 27 16:50:44 crc kubenswrapper[4751]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Feb 27 16:50:44 crc kubenswrapper[4751]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Feb 27 16:50:44 crc kubenswrapper[4751]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Feb 27 16:50:44 crc kubenswrapper[4751]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Feb 27 16:50:44 crc kubenswrapper[4751]: + sleep 0.5 Feb 27 16:50:44 crc kubenswrapper[4751]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Feb 27 16:50:44 crc kubenswrapper[4751]: + sleep 0.5 Feb 27 16:50:44 crc kubenswrapper[4751]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Feb 27 16:50:44 crc kubenswrapper[4751]: + cleanup_ovsdb_server_semaphore Feb 27 16:50:44 crc kubenswrapper[4751]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Feb 27 16:50:44 crc kubenswrapper[4751]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Feb 27 16:50:44 crc kubenswrapper[4751]: > Feb 27 16:50:44 crc kubenswrapper[4751]: E0227 16:50:44.676706 4751 kuberuntime_container.go:691] "PreStop hook failed" err=< Feb 27 16:50:44 crc kubenswrapper[4751]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Feb 27 16:50:44 crc kubenswrapper[4751]: + source /usr/local/bin/container-scripts/functions Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNBridge=br-int Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNRemote=tcp:localhost:6642 Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNEncapType=geneve Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNAvailabilityZones= Feb 27 16:50:44 crc kubenswrapper[4751]: ++ EnableChassisAsGateway=true Feb 27 16:50:44 crc kubenswrapper[4751]: ++ PhysicalNetworks= Feb 27 16:50:44 crc kubenswrapper[4751]: ++ OVNHostName= Feb 27 16:50:44 crc kubenswrapper[4751]: ++ DB_FILE=/etc/openvswitch/conf.db Feb 27 16:50:44 crc kubenswrapper[4751]: ++ ovs_dir=/var/lib/openvswitch Feb 27 16:50:44 crc kubenswrapper[4751]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Feb 27 16:50:44 crc kubenswrapper[4751]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Feb 27 16:50:44 crc kubenswrapper[4751]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Feb 27 16:50:44 crc kubenswrapper[4751]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Feb 27 16:50:44 crc kubenswrapper[4751]: + sleep 0.5 Feb 27 16:50:44 crc kubenswrapper[4751]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Feb 27 16:50:44 crc kubenswrapper[4751]: + sleep 0.5 Feb 27 16:50:44 crc kubenswrapper[4751]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Feb 27 16:50:44 crc kubenswrapper[4751]: + cleanup_ovsdb_server_semaphore Feb 27 16:50:44 crc kubenswrapper[4751]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Feb 27 16:50:44 crc kubenswrapper[4751]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Feb 27 16:50:44 crc kubenswrapper[4751]: > pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" containerID="cri-o://f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.676736 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" containerID="cri-o://f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" gracePeriod=29 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.705755 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwntj\" (UniqueName: \"kubernetes.io/projected/438372fd-dcc8-47e3-a547-c8a1729b2f1f-kube-api-access-hwntj\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.705781 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.705811 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/95b4a6cb-a957-4a31-8510-292eb1305ad6-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.705822 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/438372fd-dcc8-47e3-a547-c8a1729b2f1f-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.705830 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.705850 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812373 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-ng9tv"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812430 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-ng9tv"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812457 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812473 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-748c66fdb6-xsx5t"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812485 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-55c754cd9d-n8xn9"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812503 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-pnm6t"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812524 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-pnm6t"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812534 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-fb4c-account-create-update-7hrjf"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812546 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812557 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-gw6w6"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812581 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-h4kdv"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812598 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-gw6w6"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812614 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-h4kdv"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812628 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-d758-account-create-update-jpvqv"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812648 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-j2chq"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.812657 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-j2chq"] Feb 27 16:50:44 crc kubenswrapper[4751]: E0227 16:50:44.818248 4751 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Feb 27 16:50:44 crc kubenswrapper[4751]: E0227 16:50:44.818314 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data podName:51a81c6a-6814-412d-b77d-e741f1f74446 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:46.818298382 +0000 UTC m=+1608.965312829 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data") pod "rabbitmq-server-0" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446") : configmap "rabbitmq-config-data" not found Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.821420 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.822779 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerName="nova-api-log" containerID="cri-o://7093fc3fe4d41f6bca93c56cdb9de8d375834320491b81f1d7f637c564ea6641" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.823065 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-748c66fdb6-xsx5t" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerName="barbican-api-log" containerID="cri-o://66ec49a151bde81e12512fb05eabd11d784e82af2fa19e9c977a0f218bb55c6d" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.823256 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" podUID="a9f1619e-893b-4f17-b105-214ccbf6385e" containerName="barbican-keystone-listener-log" containerID="cri-o://b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.823637 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerName="nova-api-api" containerID="cri-o://a91412d1338cfce1b6aed60bd52a679afa01513653b047734294840a9a916ff5" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.823714 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-748c66fdb6-xsx5t" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerName="barbican-api" containerID="cri-o://9fd5df7074b1fc7b9bf2a447c5d88215370bd4201e24afb0b45b856f50e14328" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.823758 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" podUID="a9f1619e-893b-4f17-b105-214ccbf6385e" containerName="barbican-keystone-listener" containerID="cri-o://cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.848366 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "438372fd-dcc8-47e3-a547-c8a1729b2f1f" (UID: "438372fd-dcc8-47e3-a547-c8a1729b2f1f"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.848885 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "438372fd-dcc8-47e3-a547-c8a1729b2f1f" (UID: "438372fd-dcc8-47e3-a547-c8a1729b2f1f"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.875365 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-xcsrx_da8c688d-4446-4f25-853d-0f694094d0af/openstack-network-exporter/0.log" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.875451 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.918753 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbjws\" (UniqueName: \"kubernetes.io/projected/da8c688d-4446-4f25-853d-0f694094d0af-kube-api-access-rbjws\") pod \"da8c688d-4446-4f25-853d-0f694094d0af\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.918799 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovs-rundir\") pod \"da8c688d-4446-4f25-853d-0f694094d0af\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.918829 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-metrics-certs-tls-certs\") pod \"da8c688d-4446-4f25-853d-0f694094d0af\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.918889 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovn-rundir\") pod \"da8c688d-4446-4f25-853d-0f694094d0af\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.918921 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-combined-ca-bundle\") pod \"da8c688d-4446-4f25-853d-0f694094d0af\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.918997 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8c688d-4446-4f25-853d-0f694094d0af-config\") pod \"da8c688d-4446-4f25-853d-0f694094d0af\" (UID: \"da8c688d-4446-4f25-853d-0f694094d0af\") " Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.919018 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "da8c688d-4446-4f25-853d-0f694094d0af" (UID: "da8c688d-4446-4f25-853d-0f694094d0af"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.919465 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "da8c688d-4446-4f25-853d-0f694094d0af" (UID: "da8c688d-4446-4f25-853d-0f694094d0af"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.921704 4751 generic.go:334] "Generic (PLEG): container finished" podID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" exitCode=0 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.921802 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frvvc" event={"ID":"a888fc6d-a4cc-4bc8-bca1-dafdfed15274","Type":"ContainerDied","Data":"f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47"} Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.924510 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da8c688d-4446-4f25-853d-0f694094d0af-config" (OuterVolumeSpecName: "config") pod "da8c688d-4446-4f25-853d-0f694094d0af" (UID: "da8c688d-4446-4f25-853d-0f694094d0af"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.925806 4751 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.925829 4751 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovs-rundir\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.925839 4751 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/438372fd-dcc8-47e3-a547-c8a1729b2f1f-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.925847 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.943896 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-c1fb-account-create-update-zkj9t"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.953128 4751 generic.go:334] "Generic (PLEG): container finished" podID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" containerID="42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb" exitCode=143 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.953247 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" event={"ID":"a0840d34-f0f3-4bfd-a33c-29cc1e268586","Type":"ContainerDied","Data":"42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb"} Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.981057 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.981492 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="47bf8499-97a6-4f76-8e2e-25b3fbff1d93" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58" gracePeriod=30 Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.982093 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-xcsrx_da8c688d-4446-4f25-853d-0f694094d0af/openstack-network-exporter/0.log" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.982144 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-xcsrx" event={"ID":"da8c688d-4446-4f25-853d-0f694094d0af","Type":"ContainerDied","Data":"bbdea870242c38adabac7f852cce023aaf80328526d522bae0e76cf888724c8a"} Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.982166 4751 scope.go:117] "RemoveContainer" containerID="d795f9fa74378811b1eaa8d00254a0ee94069992318ba57c1f1494363a208bf8" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.982240 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-xcsrx" Feb 27 16:50:44 crc kubenswrapper[4751]: I0227 16:50:44.993370 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.002544 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da8c688d-4446-4f25-853d-0f694094d0af" (UID: "da8c688d-4446-4f25-853d-0f694094d0af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.003213 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-d6fpf"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.009028 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" event={"ID":"ef097fe8-b372-4175-a5be-15fbb62905c9","Type":"ContainerDied","Data":"254fbbb664fd0dce6336c71bf65cddac0792e25e5d22c657dfedad0fc2c77195"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.009127 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-4xmph" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.011162 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-d6fpf"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.018465 4751 scope.go:117] "RemoveContainer" containerID="23b962a77f710e938d8fdccbba41627ac2c63f6590f398c64a36726e1a306a60" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.018823 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-mcjmb"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.027259 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.027507 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="2d8d9ed0-8606-47cb-a164-7e6bbac390cd" containerName="nova-cell1-conductor-conductor" containerID="cri-o://2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6" gracePeriod=30 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.029292 4751 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/da8c688d-4446-4f25-853d-0f694094d0af-ovn-rundir\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.029309 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.029318 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8c688d-4446-4f25-853d-0f694094d0af-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.032623 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da8c688d-4446-4f25-853d-0f694094d0af-kube-api-access-rbjws" (OuterVolumeSpecName: "kube-api-access-rbjws") pod "da8c688d-4446-4f25-853d-0f694094d0af" (UID: "da8c688d-4446-4f25-853d-0f694094d0af"). InnerVolumeSpecName "kube-api-access-rbjws". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.034461 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-mcjmb"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.050015 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.050221 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="f501d880-21be-44e3-b015-05b79e226279" containerName="nova-cell0-conductor-conductor" containerID="cri-o://da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5" gracePeriod=30 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.081061 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-hvcjf"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087338 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="de66ee2999fac29e22b4821b042f3b6c8bcd8af2215e1895604af96a423ffd6d" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087372 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="8d3c817e96059d70b12ef7286c0552c07aa2499059b8053476c3d15e8305625f" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087385 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="83ce35274bb0792c55293b6011d40b169da183543e50dad4bd7e201d6f6a7146" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087395 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="23f3e1c514c8b828bd9ae5bb87e214f223a4588483efcabd156863e7581c145e" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087475 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="d9d8a8e1e05ef6e1c7673a877cb8fbfae2175606b7184f573f2d5f77b2fb1d24" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087486 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="4ca99524eea4a99a550fc63f10a9bcbfd3c3e4a41fa126a72fb0140d5e9d14f0" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087494 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="e79d296ac1de0b104dda270fbeebf21a4f77921165bf8426ffb98a3ab9fd68bd" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087502 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="b93427209a64b6f4800316cc95e813dbfe839f3c1cc330375575973fc2bd09ba" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087511 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="681fe80f14cb936ac1603940896731427ac9c788d645cc1ee250520ef0030927" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087519 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="3e77e6f4a377245e2e374c8bf467f0ec059e1247d85be6db7d91b8496e308e18" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087528 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="3781fd0007798258c29970e2eca3675df69d6304df681043d13fc310c53b5b2d" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087538 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="a303cad8ff9d037361da17d1a15f7f3b922522d9c253a7d490bbde1a81132839" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087547 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="4cafaf593ca2edd1eaf7ce55b2075b944bb67896f93d4fa2ddbe908cbb542c69" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087556 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="3d28978244e9cafd7b1ee3ade5f195d2ae28102706cdb9083ebd620acc9c5453" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087609 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"de66ee2999fac29e22b4821b042f3b6c8bcd8af2215e1895604af96a423ffd6d"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087640 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"8d3c817e96059d70b12ef7286c0552c07aa2499059b8053476c3d15e8305625f"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087655 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"83ce35274bb0792c55293b6011d40b169da183543e50dad4bd7e201d6f6a7146"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087667 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"23f3e1c514c8b828bd9ae5bb87e214f223a4588483efcabd156863e7581c145e"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087679 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"d9d8a8e1e05ef6e1c7673a877cb8fbfae2175606b7184f573f2d5f77b2fb1d24"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087693 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"4ca99524eea4a99a550fc63f10a9bcbfd3c3e4a41fa126a72fb0140d5e9d14f0"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087705 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"e79d296ac1de0b104dda270fbeebf21a4f77921165bf8426ffb98a3ab9fd68bd"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087718 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"b93427209a64b6f4800316cc95e813dbfe839f3c1cc330375575973fc2bd09ba"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087729 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"681fe80f14cb936ac1603940896731427ac9c788d645cc1ee250520ef0030927"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087740 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"3e77e6f4a377245e2e374c8bf467f0ec059e1247d85be6db7d91b8496e308e18"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087754 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"3781fd0007798258c29970e2eca3675df69d6304df681043d13fc310c53b5b2d"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087768 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"a303cad8ff9d037361da17d1a15f7f3b922522d9c253a7d490bbde1a81132839"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087779 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"4cafaf593ca2edd1eaf7ce55b2075b944bb67896f93d4fa2ddbe908cbb542c69"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.087792 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"3d28978244e9cafd7b1ee3ade5f195d2ae28102706cdb9083ebd620acc9c5453"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.089884 4751 generic.go:334] "Generic (PLEG): container finished" podID="27f559b3-2c7d-4567-b836-702db66d74ae" containerID="1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1" exitCode=143 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.089942 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"27f559b3-2c7d-4567-b836-702db66d74ae","Type":"ContainerDied","Data":"1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.098111 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-hvcjf" event={"ID":"7dc9beed-8444-4389-8859-234af0090157","Type":"ContainerStarted","Data":"be8e4903b4c126bc47643cbadc02767416367166a4e0b07754ac034095ede4fb"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.101175 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_438372fd-dcc8-47e3-a547-c8a1729b2f1f/ovsdbserver-sb/0.log" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.101221 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"438372fd-dcc8-47e3-a547-c8a1729b2f1f","Type":"ContainerDied","Data":"e1c16cb0b8b15b9a2587d6ad0f42f523773b2f00301aa390b5128119547643bf"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.104527 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.118565 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.118897 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="7555b92d-c801-4da2-8d2e-78fa39c892d2" containerName="nova-scheduler-scheduler" containerID="cri-o://93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65" gracePeriod=30 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.140762 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbjws\" (UniqueName: \"kubernetes.io/projected/da8c688d-4446-4f25-853d-0f694094d0af-kube-api-access-rbjws\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.143722 4751 generic.go:334] "Generic (PLEG): container finished" podID="ef932397-22e9-4d46-90e3-57076299d4cf" containerID="d6c9a2d485d12adf1118656837947a9c97a35defd0392ad64dca5800e77a1603" exitCode=143 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.143844 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef932397-22e9-4d46-90e3-57076299d4cf","Type":"ContainerDied","Data":"d6c9a2d485d12adf1118656837947a9c97a35defd0392ad64dca5800e77a1603"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.178458 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-fb4c-account-create-update-7hrjf"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.192751 4751 generic.go:334] "Generic (PLEG): container finished" podID="16754588-ca23-484b-b8e8-21bc94c640f3" containerID="8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7" exitCode=0 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.192848 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-649c97d5df-x4tkf" event={"ID":"16754588-ca23-484b-b8e8-21bc94c640f3","Type":"ContainerDied","Data":"8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.198847 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-c1fb-account-create-update-zkj9t"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.211552 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-d758-account-create-update-jpvqv"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.226159 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_95b4a6cb-a957-4a31-8510-292eb1305ad6/ovsdbserver-nb/0.log" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.226232 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"95b4a6cb-a957-4a31-8510-292eb1305ad6","Type":"ContainerDied","Data":"d735ddd6fc4ae0cc68c0180d55cfe240e0a1db24d8be54690ffa21863e8eb509"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.226333 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.226679 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "da8c688d-4446-4f25-853d-0f694094d0af" (UID: "da8c688d-4446-4f25-853d-0f694094d0af"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.243023 4751 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/da8c688d-4446-4f25-853d-0f694094d0af-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.249655 4751 generic.go:334] "Generic (PLEG): container finished" podID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerID="efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20" exitCode=143 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.249770 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"68bcf8b3-a271-47f0-9815-17cd3fdaec3e","Type":"ContainerDied","Data":"efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20"} Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.271969 4751 generic.go:334] "Generic (PLEG): container finished" podID="176ca33f-0a66-4132-bdf1-4be84eba5b34" containerID="92998f2cdf2e9c4313aa0a3ab7697b4d34436e6d3fd22cb366615eb354f3f91a" exitCode=137 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.272047 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="625bff4cc80cb2c66273975019f2115f60a8551f8bfcfd9f6c97ecc2ec9d0ae3" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.299003 4751 generic.go:334] "Generic (PLEG): container finished" podID="2ad24b50-556b-4799-a598-b7618c1664fd" containerID="e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e" exitCode=143 Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.299021 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2ad24b50-556b-4799-a598-b7618c1664fd","Type":"ContainerDied","Data":"e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e"} Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.312294 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 16:50:45 crc kubenswrapper[4751]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: MYSQL_CMD="mysql -h -u root -P 3306" Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: if [ -n "glance" ]; then Feb 27 16:50:45 crc kubenswrapper[4751]: GRANT_DATABASE="glance" Feb 27 16:50:45 crc kubenswrapper[4751]: else Feb 27 16:50:45 crc kubenswrapper[4751]: GRANT_DATABASE="*" Feb 27 16:50:45 crc kubenswrapper[4751]: fi Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: # going for maximum compatibility here: Feb 27 16:50:45 crc kubenswrapper[4751]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Feb 27 16:50:45 crc kubenswrapper[4751]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Feb 27 16:50:45 crc kubenswrapper[4751]: # 3. create user with CREATE but then do all password and TLS with ALTER to Feb 27 16:50:45 crc kubenswrapper[4751]: # support updates Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: $MYSQL_CMD < logger="UnhandledError" Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.314205 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"glance-db-secret\\\" not found\"" pod="openstack/glance-543a-account-create-update-mxr7p" podUID="6c4060a4-e264-4a4a-90ea-4a270cc50940" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.327849 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="4d395a15-ded3-4216-a09e-85b0305c2225" containerName="galera" containerID="cri-o://70a38562ca2c9a0c4ec9524467e090e29e28ad4754513ec3db92df66b24fd0e5" gracePeriod=30 Feb 27 16:50:45 crc kubenswrapper[4751]: W0227 16:50:45.327986 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7fb2588_cb2f_4495_ab6d_4f6aef939caf.slice/crio-ea99d11420fa194c37d5dcbdc2a6ebb761c29bffd17512ae4a56a10d593ba94b WatchSource:0}: Error finding container ea99d11420fa194c37d5dcbdc2a6ebb761c29bffd17512ae4a56a10d593ba94b: Status 404 returned error can't find the container with id ea99d11420fa194c37d5dcbdc2a6ebb761c29bffd17512ae4a56a10d593ba94b Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.343214 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 16:50:45 crc kubenswrapper[4751]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: MYSQL_CMD="mysql -h -u root -P 3306" Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: if [ -n "cinder" ]; then Feb 27 16:50:45 crc kubenswrapper[4751]: GRANT_DATABASE="cinder" Feb 27 16:50:45 crc kubenswrapper[4751]: else Feb 27 16:50:45 crc kubenswrapper[4751]: GRANT_DATABASE="*" Feb 27 16:50:45 crc kubenswrapper[4751]: fi Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: # going for maximum compatibility here: Feb 27 16:50:45 crc kubenswrapper[4751]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Feb 27 16:50:45 crc kubenswrapper[4751]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Feb 27 16:50:45 crc kubenswrapper[4751]: # 3. create user with CREATE but then do all password and TLS with ALTER to Feb 27 16:50:45 crc kubenswrapper[4751]: # support updates Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: $MYSQL_CMD < logger="UnhandledError" Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.343558 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 16:50:45 crc kubenswrapper[4751]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: MYSQL_CMD="mysql -h -u root -P 3306" Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: if [ -n "neutron" ]; then Feb 27 16:50:45 crc kubenswrapper[4751]: GRANT_DATABASE="neutron" Feb 27 16:50:45 crc kubenswrapper[4751]: else Feb 27 16:50:45 crc kubenswrapper[4751]: GRANT_DATABASE="*" Feb 27 16:50:45 crc kubenswrapper[4751]: fi Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: # going for maximum compatibility here: Feb 27 16:50:45 crc kubenswrapper[4751]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Feb 27 16:50:45 crc kubenswrapper[4751]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Feb 27 16:50:45 crc kubenswrapper[4751]: # 3. create user with CREATE but then do all password and TLS with ALTER to Feb 27 16:50:45 crc kubenswrapper[4751]: # support updates Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: $MYSQL_CMD < logger="UnhandledError" Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.344636 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"neutron-db-secret\\\" not found\"" pod="openstack/neutron-fb4c-account-create-update-7hrjf" podUID="d7fb2588-cb2f-4495-ab6d-4f6aef939caf" Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.344685 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-d758-account-create-update-jpvqv" podUID="b959a608-80f8-43f4-81a4-203b9a27467d" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.363790 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="51a81c6a-6814-412d-b77d-e741f1f74446" containerName="rabbitmq" containerID="cri-o://56a09490ed1f916c96c436deb77a88d652e14cee4afd925713481445d7f435ab" gracePeriod=604800 Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.378825 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 16:50:45 crc kubenswrapper[4751]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: MYSQL_CMD="mysql -h -u root -P 3306" Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: if [ -n "nova_api" ]; then Feb 27 16:50:45 crc kubenswrapper[4751]: GRANT_DATABASE="nova_api" Feb 27 16:50:45 crc kubenswrapper[4751]: else Feb 27 16:50:45 crc kubenswrapper[4751]: GRANT_DATABASE="*" Feb 27 16:50:45 crc kubenswrapper[4751]: fi Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: # going for maximum compatibility here: Feb 27 16:50:45 crc kubenswrapper[4751]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Feb 27 16:50:45 crc kubenswrapper[4751]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Feb 27 16:50:45 crc kubenswrapper[4751]: # 3. create user with CREATE but then do all password and TLS with ALTER to Feb 27 16:50:45 crc kubenswrapper[4751]: # support updates Feb 27 16:50:45 crc kubenswrapper[4751]: Feb 27 16:50:45 crc kubenswrapper[4751]: $MYSQL_CMD < logger="UnhandledError" Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.384840 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-c1fb-account-create-update-zkj9t" podUID="465988d0-be74-4295-bb31-4265148803e8" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.402052 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.443474 4751 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-681fe80f14cb936ac1603940896731427ac9c788d645cc1ee250520ef0030927.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-681fe80f14cb936ac1603940896731427ac9c788d645cc1ee250520ef0030927.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-8d3c817e96059d70b12ef7286c0552c07aa2499059b8053476c3d15e8305625f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-3781fd0007798258c29970e2eca3675df69d6304df681043d13fc310c53b5b2d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-d9d8a8e1e05ef6e1c7673a877cb8fbfae2175606b7184f573f2d5f77b2fb1d24.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-3e77e6f4a377245e2e374c8bf467f0ec059e1247d85be6db7d91b8496e308e18.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ad24b50_556b_4799_a598_b7618c1664fd.slice/crio-conmon-e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-8d3c817e96059d70b12ef7286c0552c07aa2499059b8053476c3d15e8305625f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-83ce35274bb0792c55293b6011d40b169da183543e50dad4bd7e201d6f6a7146.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-b93427209a64b6f4800316cc95e813dbfe839f3c1cc330375575973fc2bd09ba.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-23f3e1c514c8b828bd9ae5bb87e214f223a4588483efcabd156863e7581c145e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-4cafaf593ca2edd1eaf7ce55b2075b944bb67896f93d4fa2ddbe908cbb542c69.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-3d28978244e9cafd7b1ee3ade5f195d2ae28102706cdb9083ebd620acc9c5453.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ad24b50_556b_4799_a598_b7618c1664fd.slice/crio-e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-a303cad8ff9d037361da17d1a15f7f3b922522d9c253a7d490bbde1a81132839.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-4cafaf593ca2edd1eaf7ce55b2075b944bb67896f93d4fa2ddbe908cbb542c69.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27f559b3_2c7d_4567_b836_702db66d74ae.slice/crio-1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16754588_ca23_484b_b8e8_21bc94c640f3.slice/crio-8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-e79d296ac1de0b104dda270fbeebf21a4f77921165bf8426ffb98a3ab9fd68bd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-3e77e6f4a377245e2e374c8bf467f0ec059e1247d85be6db7d91b8496e308e18.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-a303cad8ff9d037361da17d1a15f7f3b922522d9c253a7d490bbde1a81132839.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-3781fd0007798258c29970e2eca3675df69d6304df681043d13fc310c53b5b2d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9ca6eb2_820e_49ea_80ca_bd0e352d4243.slice/crio-conmon-66ec49a151bde81e12512fb05eabd11d784e82af2fa19e9c977a0f218bb55c6d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-b93427209a64b6f4800316cc95e813dbfe839f3c1cc330375575973fc2bd09ba.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-e79d296ac1de0b104dda270fbeebf21a4f77921165bf8426ffb98a3ab9fd68bd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-23f3e1c514c8b828bd9ae5bb87e214f223a4588483efcabd156863e7581c145e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-de66ee2999fac29e22b4821b042f3b6c8bcd8af2215e1895604af96a423ffd6d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd9f1bc_399b_4282_a2cf_b76526fcfca5.slice/crio-conmon-4ca99524eea4a99a550fc63f10a9bcbfd3c3e4a41fa126a72fb0140d5e9d14f0.scope\": RecentStats: unable to find data in memory cache]" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.472891 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.474688 4751 scope.go:117] "RemoveContainer" containerID="ff7df5badcf4d604fc8a4002a9fdc2e25bffaf7333ce4b9ec101b40ca09bbc07" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.490607 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.508997 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.523682 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.533447 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-4xmph"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.540741 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-4xmph"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.551842 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-xcsrx"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.551888 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-xcsrx"] Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.564025 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config-secret\") pod \"176ca33f-0a66-4132-bdf1-4be84eba5b34\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.564270 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config\") pod \"176ca33f-0a66-4132-bdf1-4be84eba5b34\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.564327 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jm8k\" (UniqueName: \"kubernetes.io/projected/176ca33f-0a66-4132-bdf1-4be84eba5b34-kube-api-access-8jm8k\") pod \"176ca33f-0a66-4132-bdf1-4be84eba5b34\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.564372 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-combined-ca-bundle\") pod \"176ca33f-0a66-4132-bdf1-4be84eba5b34\" (UID: \"176ca33f-0a66-4132-bdf1-4be84eba5b34\") " Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.566874 4751 scope.go:117] "RemoveContainer" containerID="990344b7f81bfa825e7bad025dd1c2594ea1b24962516e0ebe87a7b0ebe6d249" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.582606 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/176ca33f-0a66-4132-bdf1-4be84eba5b34-kube-api-access-8jm8k" (OuterVolumeSpecName: "kube-api-access-8jm8k") pod "176ca33f-0a66-4132-bdf1-4be84eba5b34" (UID: "176ca33f-0a66-4132-bdf1-4be84eba5b34"). InnerVolumeSpecName "kube-api-access-8jm8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.605610 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "176ca33f-0a66-4132-bdf1-4be84eba5b34" (UID: "176ca33f-0a66-4132-bdf1-4be84eba5b34"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.658207 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "176ca33f-0a66-4132-bdf1-4be84eba5b34" (UID: "176ca33f-0a66-4132-bdf1-4be84eba5b34"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.666158 4751 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.666196 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jm8k\" (UniqueName: \"kubernetes.io/projected/176ca33f-0a66-4132-bdf1-4be84eba5b34-kube-api-access-8jm8k\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.666208 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.703043 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "176ca33f-0a66-4132-bdf1-4be84eba5b34" (UID: "176ca33f-0a66-4132-bdf1-4be84eba5b34"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.770295 4751 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/176ca33f-0a66-4132-bdf1-4be84eba5b34-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.770389 4751 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Feb 27 16:50:45 crc kubenswrapper[4751]: E0227 16:50:45.770460 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data podName:cecf602c-dec2-40c6-922c-bf84b707b1b9 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:49.770442456 +0000 UTC m=+1611.917456903 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data") pod "rabbitmq-cell1-server-0" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9") : configmap "rabbitmq-cell1-config-data" not found Feb 27 16:50:45 crc kubenswrapper[4751]: I0227 16:50:45.981571 4751 scope.go:117] "RemoveContainer" containerID="6c74828590d469e03165fd6f252422867fd14e665dbe3ddfb7a6c1b2f1561bb7" Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.061796 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-ccb964dc9-cj74q"] Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.062033 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-ccb964dc9-cj74q" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerName="proxy-httpd" containerID="cri-o://843802b514320212732f6a6e3503b615909bac4f2d8f4d4458b80f0b1046f521" gracePeriod=30 Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.062397 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-ccb964dc9-cj74q" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerName="proxy-server" containerID="cri-o://668b3715f8d8476a3e1d9d7443b5adb7e8ae4b4b6eac2c5be4a3dc6216b3c24f" gracePeriod=30 Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.069606 4751 scope.go:117] "RemoveContainer" containerID="21f622d5f20191c0f88a55bea7b29298d2743cf401c56a29ef7e4407b0fd82c6" Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.117697 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.127721 4751 scope.go:117] "RemoveContainer" containerID="5f0141511ca3d3aa75b1878aa729a5715a4ce124f70bcdf3e79e44f61c356a32" Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.160458 4751 scope.go:117] "RemoveContainer" containerID="67b68970dcc70c2551fa94757bc51fc1016792d1261833505238d86e1d89cc24" Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.216677 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.298974 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8c5xj\" (UniqueName: \"kubernetes.io/projected/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-kube-api-access-8c5xj\") pod \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.299093 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-nova-novncproxy-tls-certs\") pod \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.299179 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-combined-ca-bundle\") pod \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.299242 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-config-data\") pod \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.299282 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-vencrypt-tls-certs\") pod \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\" (UID: \"47bf8499-97a6-4f76-8e2e-25b3fbff1d93\") " Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.311716 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-kube-api-access-8c5xj" (OuterVolumeSpecName: "kube-api-access-8c5xj") pod "47bf8499-97a6-4f76-8e2e-25b3fbff1d93" (UID: "47bf8499-97a6-4f76-8e2e-25b3fbff1d93"). InnerVolumeSpecName "kube-api-access-8c5xj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.335828 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-config-data" (OuterVolumeSpecName: "config-data") pod "47bf8499-97a6-4f76-8e2e-25b3fbff1d93" (UID: "47bf8499-97a6-4f76-8e2e-25b3fbff1d93"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.341079 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47bf8499-97a6-4f76-8e2e-25b3fbff1d93" (UID: "47bf8499-97a6-4f76-8e2e-25b3fbff1d93"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:46 crc kubenswrapper[4751]: E0227 16:50:46.350273 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Feb 27 16:50:46 crc kubenswrapper[4751]: E0227 16:50:46.355804 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.356037 4751 generic.go:334] "Generic (PLEG): container finished" podID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerID="66ec49a151bde81e12512fb05eabd11d784e82af2fa19e9c977a0f218bb55c6d" exitCode=143 Feb 27 16:50:46 crc kubenswrapper[4751]: I0227 16:50:46.356107 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-748c66fdb6-xsx5t" event={"ID":"f9ca6eb2-820e-49ea-80ca-bd0e352d4243","Type":"ContainerDied","Data":"66ec49a151bde81e12512fb05eabd11d784e82af2fa19e9c977a0f218bb55c6d"} Feb 27 16:50:46 crc kubenswrapper[4751]: E0227 16:50:46.364529 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:46.364714 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="f501d880-21be-44e3-b015-05b79e226279" containerName="nova-cell0-conductor-conductor" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.369103 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d758-account-create-update-jpvqv" event={"ID":"b959a608-80f8-43f4-81a4-203b9a27467d","Type":"ContainerStarted","Data":"ddea37d668ad6ba768204e6980bab21fe72d6ccedfb0ee35e65e653881ad6a26"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.390593 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "47bf8499-97a6-4f76-8e2e-25b3fbff1d93" (UID: "47bf8499-97a6-4f76-8e2e-25b3fbff1d93"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.397657 4751 generic.go:334] "Generic (PLEG): container finished" podID="a9f1619e-893b-4f17-b105-214ccbf6385e" containerID="cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e" exitCode=0 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.397690 4751 generic.go:334] "Generic (PLEG): container finished" podID="a9f1619e-893b-4f17-b105-214ccbf6385e" containerID="b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d" exitCode=143 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.397756 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" event={"ID":"a9f1619e-893b-4f17-b105-214ccbf6385e","Type":"ContainerDied","Data":"cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.397782 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" event={"ID":"a9f1619e-893b-4f17-b105-214ccbf6385e","Type":"ContainerDied","Data":"b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.397810 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" event={"ID":"a9f1619e-893b-4f17-b105-214ccbf6385e","Type":"ContainerDied","Data":"e9b0e432779ddc335de600234e8d942838cb5862923c3006c0c119578f46d441"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.397826 4751 scope.go:117] "RemoveContainer" containerID="cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.398046 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-55c754cd9d-n8xn9" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.400518 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9f1619e-893b-4f17-b105-214ccbf6385e-logs\") pod \"a9f1619e-893b-4f17-b105-214ccbf6385e\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.400567 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data\") pod \"a9f1619e-893b-4f17-b105-214ccbf6385e\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.400595 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-combined-ca-bundle\") pod \"a9f1619e-893b-4f17-b105-214ccbf6385e\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.400698 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data-custom\") pod \"a9f1619e-893b-4f17-b105-214ccbf6385e\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.400726 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cn582\" (UniqueName: \"kubernetes.io/projected/a9f1619e-893b-4f17-b105-214ccbf6385e-kube-api-access-cn582\") pod \"a9f1619e-893b-4f17-b105-214ccbf6385e\" (UID: \"a9f1619e-893b-4f17-b105-214ccbf6385e\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.400969 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "47bf8499-97a6-4f76-8e2e-25b3fbff1d93" (UID: "47bf8499-97a6-4f76-8e2e-25b3fbff1d93"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.401443 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.401456 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.401464 4751 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.401472 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8c5xj\" (UniqueName: \"kubernetes.io/projected/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-kube-api-access-8c5xj\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.401483 4751 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf8499-97a6-4f76-8e2e-25b3fbff1d93-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.401819 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9f1619e-893b-4f17-b105-214ccbf6385e-logs" (OuterVolumeSpecName: "logs") pod "a9f1619e-893b-4f17-b105-214ccbf6385e" (UID: "a9f1619e-893b-4f17-b105-214ccbf6385e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.404460 4751 generic.go:334] "Generic (PLEG): container finished" podID="4d395a15-ded3-4216-a09e-85b0305c2225" containerID="70a38562ca2c9a0c4ec9524467e090e29e28ad4754513ec3db92df66b24fd0e5" exitCode=0 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.404545 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4d395a15-ded3-4216-a09e-85b0305c2225","Type":"ContainerDied","Data":"70a38562ca2c9a0c4ec9524467e090e29e28ad4754513ec3db92df66b24fd0e5"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.409149 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a9f1619e-893b-4f17-b105-214ccbf6385e" (UID: "a9f1619e-893b-4f17-b105-214ccbf6385e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.412078 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9f1619e-893b-4f17-b105-214ccbf6385e-kube-api-access-cn582" (OuterVolumeSpecName: "kube-api-access-cn582") pod "a9f1619e-893b-4f17-b105-214ccbf6385e" (UID: "a9f1619e-893b-4f17-b105-214ccbf6385e"). InnerVolumeSpecName "kube-api-access-cn582". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.430016 4751 generic.go:334] "Generic (PLEG): container finished" podID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerID="843802b514320212732f6a6e3503b615909bac4f2d8f4d4458b80f0b1046f521" exitCode=0 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.430130 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ccb964dc9-cj74q" event={"ID":"ef465c53-5add-41ff-9fcc-00e714bc2bc0","Type":"ContainerDied","Data":"843802b514320212732f6a6e3503b615909bac4f2d8f4d4458b80f0b1046f521"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.439855 4751 scope.go:117] "RemoveContainer" containerID="b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.452298 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a9f1619e-893b-4f17-b105-214ccbf6385e" (UID: "a9f1619e-893b-4f17-b105-214ccbf6385e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.454657 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data" (OuterVolumeSpecName: "config-data") pod "a9f1619e-893b-4f17-b105-214ccbf6385e" (UID: "a9f1619e-893b-4f17-b105-214ccbf6385e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.458019 4751 generic.go:334] "Generic (PLEG): container finished" podID="47bf8499-97a6-4f76-8e2e-25b3fbff1d93" containerID="916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58" exitCode=0 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.458070 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"47bf8499-97a6-4f76-8e2e-25b3fbff1d93","Type":"ContainerDied","Data":"916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.458171 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"47bf8499-97a6-4f76-8e2e-25b3fbff1d93","Type":"ContainerDied","Data":"1484e719e243a2cfef5fb369afed1455dc208194c592d425ba92642bdf7893db"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.458324 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.469565 4751 generic.go:334] "Generic (PLEG): container finished" podID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerID="7093fc3fe4d41f6bca93c56cdb9de8d375834320491b81f1d7f637c564ea6641" exitCode=143 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.469616 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa","Type":"ContainerDied","Data":"7093fc3fe4d41f6bca93c56cdb9de8d375834320491b81f1d7f637c564ea6641"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.470835 4751 generic.go:334] "Generic (PLEG): container finished" podID="7dc9beed-8444-4389-8859-234af0090157" containerID="1146c08b2b84666378e79f91a1dd4237f5999be9cb0cf5e945a9e24d393f37db" exitCode=1 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.470862 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-hvcjf" event={"ID":"7dc9beed-8444-4389-8859-234af0090157","Type":"ContainerDied","Data":"1146c08b2b84666378e79f91a1dd4237f5999be9cb0cf5e945a9e24d393f37db"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.475042 4751 scope.go:117] "RemoveContainer" containerID="1146c08b2b84666378e79f91a1dd4237f5999be9cb0cf5e945a9e24d393f37db" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.475522 4751 scope.go:117] "RemoveContainer" containerID="cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:46.476461 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e\": container with ID starting with cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e not found: ID does not exist" containerID="cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.476488 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e"} err="failed to get container status \"cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e\": rpc error: code = NotFound desc = could not find container \"cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e\": container with ID starting with cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e not found: ID does not exist" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.476532 4751 scope.go:117] "RemoveContainer" containerID="b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:46.479074 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d\": container with ID starting with b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d not found: ID does not exist" containerID="b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.479096 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d"} err="failed to get container status \"b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d\": rpc error: code = NotFound desc = could not find container \"b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d\": container with ID starting with b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d not found: ID does not exist" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.479110 4751 scope.go:117] "RemoveContainer" containerID="cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.479818 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e"} err="failed to get container status \"cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e\": rpc error: code = NotFound desc = could not find container \"cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e\": container with ID starting with cb50b4038b52526d094348a64713dcefe6038b082abf010e1ec5590d50b6e67e not found: ID does not exist" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.479839 4751 scope.go:117] "RemoveContainer" containerID="b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.480148 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c1fb-account-create-update-zkj9t" event={"ID":"465988d0-be74-4295-bb31-4265148803e8","Type":"ContainerStarted","Data":"60d364d2a8d1d18c70f72c4a5dd1e1482e45b16d6647e104afcb27083233f8b2"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.480152 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d"} err="failed to get container status \"b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d\": rpc error: code = NotFound desc = could not find container \"b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d\": container with ID starting with b8d92b7b3132116dd0110c81240f49261aa50bec396280f512da572100bafb6d not found: ID does not exist" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.480193 4751 scope.go:117] "RemoveContainer" containerID="916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.502657 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.502676 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cn582\" (UniqueName: \"kubernetes.io/projected/a9f1619e-893b-4f17-b105-214ccbf6385e-kube-api-access-cn582\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.502685 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9f1619e-893b-4f17-b105-214ccbf6385e-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.502694 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.502702 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9f1619e-893b-4f17-b105-214ccbf6385e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.507797 4751 generic.go:334] "Generic (PLEG): container finished" podID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" containerID="2ecbec27a7197208f58327c1b614eb58bf364a81b50228cd0b0b7068505b1049" exitCode=0 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.507845 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"23d98e0b-8d21-4ad9-b3a4-716c1d221949","Type":"ContainerDied","Data":"2ecbec27a7197208f58327c1b614eb58bf364a81b50228cd0b0b7068505b1049"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.566703 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.667013 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="176ca33f-0a66-4132-bdf1-4be84eba5b34" path="/var/lib/kubelet/pods/176ca33f-0a66-4132-bdf1-4be84eba5b34/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.667731 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25f3f7ef-8fae-4ae3-812c-27d2fb474723" path="/var/lib/kubelet/pods/25f3f7ef-8fae-4ae3-812c-27d2fb474723/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.669671 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fa0f5e2-5ce2-49ca-8873-4ef71aac0241" path="/var/lib/kubelet/pods/2fa0f5e2-5ce2-49ca-8873-4ef71aac0241/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.670978 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" path="/var/lib/kubelet/pods/438372fd-dcc8-47e3-a547-c8a1729b2f1f/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.677333 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d0af769-7ac5-4a47-b229-5b456f60d406" path="/var/lib/kubelet/pods/6d0af769-7ac5-4a47-b229-5b456f60d406/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.680379 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8548f461-a34e-4c42-9d42-ee0a8b0bb7c7" path="/var/lib/kubelet/pods/8548f461-a34e-4c42-9d42-ee0a8b0bb7c7/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.685696 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95b4a6cb-a957-4a31-8510-292eb1305ad6" path="/var/lib/kubelet/pods/95b4a6cb-a957-4a31-8510-292eb1305ad6/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.686314 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b05cb31e-70e5-4e64-984a-6fa8053743de" path="/var/lib/kubelet/pods/b05cb31e-70e5-4e64-984a-6fa8053743de/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.686916 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c696c27b-af62-4855-8694-1e541307c4f5" path="/var/lib/kubelet/pods/c696c27b-af62-4855-8694-1e541307c4f5/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.687979 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d074bd48-85b6-4bcf-ad23-bb541f92984d" path="/var/lib/kubelet/pods/d074bd48-85b6-4bcf-ad23-bb541f92984d/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.688501 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da8c688d-4446-4f25-853d-0f694094d0af" path="/var/lib/kubelet/pods/da8c688d-4446-4f25-853d-0f694094d0af/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.689622 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef097fe8-b372-4175-a5be-15fbb62905c9" path="/var/lib/kubelet/pods/ef097fe8-b372-4175-a5be-15fbb62905c9/volumes" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.690600 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fb4c-account-create-update-7hrjf" event={"ID":"d7fb2588-cb2f-4495-ab6d-4f6aef939caf","Type":"ContainerStarted","Data":"ea99d11420fa194c37d5dcbdc2a6ebb761c29bffd17512ae4a56a10d593ba94b"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.695473 4751 scope.go:117] "RemoveContainer" containerID="916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:46.696550 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58\": container with ID starting with 916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58 not found: ID does not exist" containerID="916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.696583 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58"} err="failed to get container status \"916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58\": rpc error: code = NotFound desc = could not find container \"916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58\": container with ID starting with 916390746d00d1b65c9ea5594a62aa426093bc9d7520f7d5c66283ffee926d58 not found: ID does not exist" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.706664 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.720076 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.877213 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-ccb964dc9-cj74q" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.174:8080/healthcheck\": dial tcp 10.217.0.174:8080: connect: connection refused" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.879594 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-ccb964dc9-cj74q" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.174:8080/healthcheck\": dial tcp 10.217.0.174:8080: connect: connection refused" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:46.911334 4751 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:46.911421 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data podName:51a81c6a-6814-412d-b77d-e741f1f74446 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:50.911380427 +0000 UTC m=+1613.058394874 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data") pod "rabbitmq-server-0" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446") : configmap "rabbitmq-config-data" not found Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:46.994435 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.011037 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-55c754cd9d-n8xn9"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.019097 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-55c754cd9d-n8xn9"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.113161 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-default\") pod \"4d395a15-ded3-4216-a09e-85b0305c2225\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.113209 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-galera-tls-certs\") pod \"4d395a15-ded3-4216-a09e-85b0305c2225\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.113332 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-combined-ca-bundle\") pod \"4d395a15-ded3-4216-a09e-85b0305c2225\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.113383 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-generated\") pod \"4d395a15-ded3-4216-a09e-85b0305c2225\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.113460 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-kolla-config\") pod \"4d395a15-ded3-4216-a09e-85b0305c2225\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.113509 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjtj8\" (UniqueName: \"kubernetes.io/projected/4d395a15-ded3-4216-a09e-85b0305c2225-kube-api-access-fjtj8\") pod \"4d395a15-ded3-4216-a09e-85b0305c2225\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.113533 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-operator-scripts\") pod \"4d395a15-ded3-4216-a09e-85b0305c2225\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.113571 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"4d395a15-ded3-4216-a09e-85b0305c2225\" (UID: \"4d395a15-ded3-4216-a09e-85b0305c2225\") " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.117246 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "4d395a15-ded3-4216-a09e-85b0305c2225" (UID: "4d395a15-ded3-4216-a09e-85b0305c2225"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.118954 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-default\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.119850 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "4d395a15-ded3-4216-a09e-85b0305c2225" (UID: "4d395a15-ded3-4216-a09e-85b0305c2225"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.120340 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "4d395a15-ded3-4216-a09e-85b0305c2225" (UID: "4d395a15-ded3-4216-a09e-85b0305c2225"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.121384 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4d395a15-ded3-4216-a09e-85b0305c2225" (UID: "4d395a15-ded3-4216-a09e-85b0305c2225"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.121481 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-gdjfm" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerName="ovn-controller" probeResult="failure" output=< Feb 27 16:50:47 crc kubenswrapper[4751]: ERROR - Failed to get connection status from ovn-controller, ovn-appctl exit status: 0 Feb 27 16:50:47 crc kubenswrapper[4751]: > Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.131033 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d395a15-ded3-4216-a09e-85b0305c2225-kube-api-access-fjtj8" (OuterVolumeSpecName: "kube-api-access-fjtj8") pod "4d395a15-ded3-4216-a09e-85b0305c2225" (UID: "4d395a15-ded3-4216-a09e-85b0305c2225"). InnerVolumeSpecName "kube-api-access-fjtj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.135153 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.137701 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "mysql-db") pod "4d395a15-ded3-4216-a09e-85b0305c2225" (UID: "4d395a15-ded3-4216-a09e-85b0305c2225"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.141434 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.141692 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.144627 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.144715 4751 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.146213 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.154846 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.154943 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovs-vswitchd" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.167972 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4d395a15-ded3-4216-a09e-85b0305c2225" (UID: "4d395a15-ded3-4216-a09e-85b0305c2225"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.205889 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "4d395a15-ded3-4216-a09e-85b0305c2225" (UID: "4d395a15-ded3-4216-a09e-85b0305c2225"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.220726 4751 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.220752 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d395a15-ded3-4216-a09e-85b0305c2225-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.220762 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4d395a15-ded3-4216-a09e-85b0305c2225-config-data-generated\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.220770 4751 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-kolla-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.220780 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjtj8\" (UniqueName: \"kubernetes.io/projected/4d395a15-ded3-4216-a09e-85b0305c2225-kube-api-access-fjtj8\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.220787 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d395a15-ded3-4216-a09e-85b0305c2225-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.220816 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.244209 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.322707 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.476330 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.476652 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="ceilometer-central-agent" containerID="cri-o://5ea526479c46d824bbf94a208fd6d3670757ee20a011265cfd59b145eb86cf1e" gracePeriod=30 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.476902 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="proxy-httpd" containerID="cri-o://c4a7175c059cf3518ae6eba6d361fbebbc8c52020d2692d9f04fb59309e9cac4" gracePeriod=30 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.477106 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="sg-core" containerID="cri-o://62ed64131f674628306788d24cfc85250f4581f979fadacc28ddf528b64bebfd" gracePeriod=30 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.477136 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="ceilometer-notification-agent" containerID="cri-o://a9ee4f7f4be2929eae47c5cf12c06d5e1590f223ebab7558c7fbef22ddb4ca6f" gracePeriod=30 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.536000 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.536339 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa" containerName="kube-state-metrics" containerID="cri-o://4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab" gracePeriod=30 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.614320 4751 generic.go:334] "Generic (PLEG): container finished" podID="7dc9beed-8444-4389-8859-234af0090157" containerID="47acd381e9eb85a84aea0efaae97b586799511423dd5af51c19474800fbeefeb" exitCode=1 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.614769 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-hvcjf" event={"ID":"7dc9beed-8444-4389-8859-234af0090157","Type":"ContainerDied","Data":"47acd381e9eb85a84aea0efaae97b586799511423dd5af51c19474800fbeefeb"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.614807 4751 scope.go:117] "RemoveContainer" containerID="1146c08b2b84666378e79f91a1dd4237f5999be9cb0cf5e945a9e24d393f37db" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.615614 4751 scope.go:117] "RemoveContainer" containerID="47acd381e9eb85a84aea0efaae97b586799511423dd5af51c19474800fbeefeb" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.615945 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CrashLoopBackOff: \"back-off 10s restarting failed container=mariadb-account-create-update pod=root-account-create-update-hvcjf_openstack(7dc9beed-8444-4389-8859-234af0090157)\"" pod="openstack/root-account-create-update-hvcjf" podUID="7dc9beed-8444-4389-8859-234af0090157" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.645902 4751 generic.go:334] "Generic (PLEG): container finished" podID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" containerID="d68a8fb32c3c122cd258ca89b0d0d8f27592db1bade310c58767879538bba0eb" exitCode=0 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.646042 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c5d5b6fdd-9d8xv" event={"ID":"3c5e58eb-31a4-4253-8cb9-a9486bb2d955","Type":"ContainerDied","Data":"d68a8fb32c3c122cd258ca89b0d0d8f27592db1bade310c58767879538bba0eb"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.667941 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4d395a15-ded3-4216-a09e-85b0305c2225","Type":"ContainerDied","Data":"a4b4d5b0b6bb6127fab72bdb2c190cb9dffe1db9ce9611e03e378c561e013581"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.668023 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.738925 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.739125 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="3c3834ac-6796-485b-9dec-e45cebf976df" containerName="memcached" containerID="cri-o://c70cb3e82423521f3790af75416c702817be4dee431d0d08dcd2396683feb66d" gracePeriod=30 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.778678 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-0a1f-account-create-update-xg72f"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.781960 4751 generic.go:334] "Generic (PLEG): container finished" podID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerID="668b3715f8d8476a3e1d9d7443b5adb7e8ae4b4b6eac2c5be4a3dc6216b3c24f" exitCode=0 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.782029 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ccb964dc9-cj74q" event={"ID":"ef465c53-5add-41ff-9fcc-00e714bc2bc0","Type":"ContainerDied","Data":"668b3715f8d8476a3e1d9d7443b5adb7e8ae4b4b6eac2c5be4a3dc6216b3c24f"} Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.785865 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-0a1f-account-create-update-xg72f"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.806702 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-xxmvj"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.808148 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-x847w"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.836347 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-xxmvj"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.841801 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="27f559b3-2c7d-4567-b836-702db66d74ae" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.172:8776/healthcheck\": read tcp 10.217.0.2:55868->10.217.0.172:8776: read: connection reset by peer" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.854267 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-0a1f-account-create-update-f6ks2"] Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857115 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" containerName="ovsdbserver-sb" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857137 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" containerName="ovsdbserver-sb" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857154 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47bf8499-97a6-4f76-8e2e-25b3fbff1d93" containerName="nova-cell1-novncproxy-novncproxy" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857161 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="47bf8499-97a6-4f76-8e2e-25b3fbff1d93" containerName="nova-cell1-novncproxy-novncproxy" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857173 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef097fe8-b372-4175-a5be-15fbb62905c9" containerName="dnsmasq-dns" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857178 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef097fe8-b372-4175-a5be-15fbb62905c9" containerName="dnsmasq-dns" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857186 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef097fe8-b372-4175-a5be-15fbb62905c9" containerName="init" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857191 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef097fe8-b372-4175-a5be-15fbb62905c9" containerName="init" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857205 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d395a15-ded3-4216-a09e-85b0305c2225" containerName="mysql-bootstrap" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857210 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d395a15-ded3-4216-a09e-85b0305c2225" containerName="mysql-bootstrap" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857220 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95b4a6cb-a957-4a31-8510-292eb1305ad6" containerName="openstack-network-exporter" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857226 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="95b4a6cb-a957-4a31-8510-292eb1305ad6" containerName="openstack-network-exporter" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857236 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95b4a6cb-a957-4a31-8510-292eb1305ad6" containerName="ovsdbserver-nb" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857242 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="95b4a6cb-a957-4a31-8510-292eb1305ad6" containerName="ovsdbserver-nb" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857257 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d395a15-ded3-4216-a09e-85b0305c2225" containerName="galera" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857262 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d395a15-ded3-4216-a09e-85b0305c2225" containerName="galera" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857274 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da8c688d-4446-4f25-853d-0f694094d0af" containerName="openstack-network-exporter" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857280 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="da8c688d-4446-4f25-853d-0f694094d0af" containerName="openstack-network-exporter" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857290 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9f1619e-893b-4f17-b105-214ccbf6385e" containerName="barbican-keystone-listener" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857297 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9f1619e-893b-4f17-b105-214ccbf6385e" containerName="barbican-keystone-listener" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857308 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9f1619e-893b-4f17-b105-214ccbf6385e" containerName="barbican-keystone-listener-log" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857314 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9f1619e-893b-4f17-b105-214ccbf6385e" containerName="barbican-keystone-listener-log" Feb 27 16:50:47 crc kubenswrapper[4751]: E0227 16:50:47.857329 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" containerName="openstack-network-exporter" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857334 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" containerName="openstack-network-exporter" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857519 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="da8c688d-4446-4f25-853d-0f694094d0af" containerName="openstack-network-exporter" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857529 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d395a15-ded3-4216-a09e-85b0305c2225" containerName="galera" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857536 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9f1619e-893b-4f17-b105-214ccbf6385e" containerName="barbican-keystone-listener" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857551 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="95b4a6cb-a957-4a31-8510-292eb1305ad6" containerName="ovsdbserver-nb" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857561 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9f1619e-893b-4f17-b105-214ccbf6385e" containerName="barbican-keystone-listener-log" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857572 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" containerName="ovsdbserver-sb" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857582 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="47bf8499-97a6-4f76-8e2e-25b3fbff1d93" containerName="nova-cell1-novncproxy-novncproxy" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857589 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="95b4a6cb-a957-4a31-8510-292eb1305ad6" containerName="openstack-network-exporter" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857597 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="438372fd-dcc8-47e3-a547-c8a1729b2f1f" containerName="openstack-network-exporter" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.857609 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef097fe8-b372-4175-a5be-15fbb62905c9" containerName="dnsmasq-dns" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.858116 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.861593 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.863726 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6db7c8cdbf-x9xf8"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.864044 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-6db7c8cdbf-x9xf8" podUID="0cf1e239-243c-4f96-abb6-c3fb850e98e1" containerName="keystone-api" containerID="cri-o://432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c" gracePeriod=30 Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.875862 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-x847w"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.896747 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.214:8775/\": read tcp 10.217.0.2:33844->10.217.0.214:8775: read: connection reset by peer" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.897023 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.214:8775/\": read tcp 10.217.0.2:33856->10.217.0.214:8775: read: connection reset by peer" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.897288 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0a1f-account-create-update-f6ks2"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.907992 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.925691 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-0a1f-account-create-update-f6ks2"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.942592 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dg54\" (UniqueName: \"kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54\") pod \"keystone-0a1f-account-create-update-f6ks2\" (UID: \"ddea1ab1-d19b-42f5-833a-6578608d54cc\") " pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.942762 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts\") pod \"keystone-0a1f-account-create-update-f6ks2\" (UID: \"ddea1ab1-d19b-42f5-833a-6578608d54cc\") " pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.957428 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-6gnd6"] Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.963861 4751 scope.go:117] "RemoveContainer" containerID="70a38562ca2c9a0c4ec9524467e090e29e28ad4754513ec3db92df66b24fd0e5" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.978525 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d758-account-create-update-jpvqv" Feb 27 16:50:47 crc kubenswrapper[4751]: I0227 16:50:47.981148 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-6gnd6"] Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.036908 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-7dg54 operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystone-0a1f-account-create-update-f6ks2" podUID="ddea1ab1-d19b-42f5-833a-6578608d54cc" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.043653 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts\") pod \"keystone-0a1f-account-create-update-f6ks2\" (UID: \"ddea1ab1-d19b-42f5-833a-6578608d54cc\") " pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.043843 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dg54\" (UniqueName: \"kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54\") pod \"keystone-0a1f-account-create-update-f6ks2\" (UID: \"ddea1ab1-d19b-42f5-833a-6578608d54cc\") " pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.043786 4751 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.044342 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts podName:ddea1ab1-d19b-42f5-833a-6578608d54cc nodeName:}" failed. No retries permitted until 2026-02-27 16:50:48.544328614 +0000 UTC m=+1610.691343051 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts") pod "keystone-0a1f-account-create-update-f6ks2" (UID: "ddea1ab1-d19b-42f5-833a-6578608d54cc") : configmap "openstack-scripts" not found Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.047947 4751 projected.go:194] Error preparing data for projected volume kube-api-access-7dg54 for pod openstack/keystone-0a1f-account-create-update-f6ks2: failed to fetch token: serviceaccounts "galera-openstack" not found Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.048110 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54 podName:ddea1ab1-d19b-42f5-833a-6578608d54cc nodeName:}" failed. No retries permitted until 2026-02-27 16:50:48.548096034 +0000 UTC m=+1610.695110491 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-7dg54" (UniqueName: "kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54") pod "keystone-0a1f-account-create-update-f6ks2" (UID: "ddea1ab1-d19b-42f5-833a-6578608d54cc") : failed to fetch token: serviceaccounts "galera-openstack" not found Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.056984 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-hvcjf"] Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.073458 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.080729 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.144922 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6c58n\" (UniqueName: \"kubernetes.io/projected/b959a608-80f8-43f4-81a4-203b9a27467d-kube-api-access-6c58n\") pod \"b959a608-80f8-43f4-81a4-203b9a27467d\" (UID: \"b959a608-80f8-43f4-81a4-203b9a27467d\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.145081 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b959a608-80f8-43f4-81a4-203b9a27467d-operator-scripts\") pod \"b959a608-80f8-43f4-81a4-203b9a27467d\" (UID: \"b959a608-80f8-43f4-81a4-203b9a27467d\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.146603 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b959a608-80f8-43f4-81a4-203b9a27467d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b959a608-80f8-43f4-81a4-203b9a27467d" (UID: "b959a608-80f8-43f4-81a4-203b9a27467d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.154458 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b959a608-80f8-43f4-81a4-203b9a27467d-kube-api-access-6c58n" (OuterVolumeSpecName: "kube-api-access-6c58n") pod "b959a608-80f8-43f4-81a4-203b9a27467d" (UID: "b959a608-80f8-43f4-81a4-203b9a27467d"). InnerVolumeSpecName "kube-api-access-6c58n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.188288 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="253a763c-21da-4224-91a2-e3bdc6eca0e9" containerName="galera" containerID="cri-o://48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329" gracePeriod=30 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.250985 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6c58n\" (UniqueName: \"kubernetes.io/projected/b959a608-80f8-43f4-81a4-203b9a27467d-kube-api-access-6c58n\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.251011 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b959a608-80f8-43f4-81a4-203b9a27467d-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.258739 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-543a-account-create-update-mxr7p" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.289700 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.301209 4751 scope.go:117] "RemoveContainer" containerID="6d76eae4f0a48089e17a193410c4eec54a030b572ca6ae036b07233bd4f4615a" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.311640 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c1fb-account-create-update-zkj9t" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.312657 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-748c66fdb6-xsx5t" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.168:9311/healthcheck\": dial tcp 10.217.0.168:9311: connect: connection refused" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.312796 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-748c66fdb6-xsx5t" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.168:9311/healthcheck\": dial tcp 10.217.0.168:9311: connect: connection refused" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.352421 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c4060a4-e264-4a4a-90ea-4a270cc50940-operator-scripts\") pod \"6c4060a4-e264-4a4a-90ea-4a270cc50940\" (UID: \"6c4060a4-e264-4a4a-90ea-4a270cc50940\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.352458 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmcxg\" (UniqueName: \"kubernetes.io/projected/6c4060a4-e264-4a4a-90ea-4a270cc50940-kube-api-access-gmcxg\") pod \"6c4060a4-e264-4a4a-90ea-4a270cc50940\" (UID: \"6c4060a4-e264-4a4a-90ea-4a270cc50940\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.353073 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c4060a4-e264-4a4a-90ea-4a270cc50940-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6c4060a4-e264-4a4a-90ea-4a270cc50940" (UID: "6c4060a4-e264-4a4a-90ea-4a270cc50940"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.357180 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c4060a4-e264-4a4a-90ea-4a270cc50940-kube-api-access-gmcxg" (OuterVolumeSpecName: "kube-api-access-gmcxg") pod "6c4060a4-e264-4a4a-90ea-4a270cc50940" (UID: "6c4060a4-e264-4a4a-90ea-4a270cc50940"). InnerVolumeSpecName "kube-api-access-gmcxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.366047 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fb4c-account-create-update-7hrjf" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.378168 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.433166 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.441004 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.458004 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-scripts\") pod \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.458057 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-public-tls-certs\") pod \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.458090 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v66zf\" (UniqueName: \"kubernetes.io/projected/465988d0-be74-4295-bb31-4265148803e8-kube-api-access-v66zf\") pod \"465988d0-be74-4295-bb31-4265148803e8\" (UID: \"465988d0-be74-4295-bb31-4265148803e8\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.458116 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-combined-ca-bundle\") pod \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.458148 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-operator-scripts\") pod \"d7fb2588-cb2f-4495-ab6d-4f6aef939caf\" (UID: \"d7fb2588-cb2f-4495-ab6d-4f6aef939caf\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.458200 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-logs\") pod \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.458216 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s757q\" (UniqueName: \"kubernetes.io/projected/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-kube-api-access-s757q\") pod \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.458242 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-config-data\") pod \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.458322 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzc98\" (UniqueName: \"kubernetes.io/projected/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-kube-api-access-nzc98\") pod \"d7fb2588-cb2f-4495-ab6d-4f6aef939caf\" (UID: \"d7fb2588-cb2f-4495-ab6d-4f6aef939caf\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.458981 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-internal-tls-certs\") pod \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\" (UID: \"3c5e58eb-31a4-4253-8cb9-a9486bb2d955\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.459010 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/465988d0-be74-4295-bb31-4265148803e8-operator-scripts\") pod \"465988d0-be74-4295-bb31-4265148803e8\" (UID: \"465988d0-be74-4295-bb31-4265148803e8\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.459430 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmcxg\" (UniqueName: \"kubernetes.io/projected/6c4060a4-e264-4a4a-90ea-4a270cc50940-kube-api-access-gmcxg\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.459445 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c4060a4-e264-4a4a-90ea-4a270cc50940-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.460302 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/465988d0-be74-4295-bb31-4265148803e8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "465988d0-be74-4295-bb31-4265148803e8" (UID: "465988d0-be74-4295-bb31-4265148803e8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.463709 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-scripts" (OuterVolumeSpecName: "scripts") pod "3c5e58eb-31a4-4253-8cb9-a9486bb2d955" (UID: "3c5e58eb-31a4-4253-8cb9-a9486bb2d955"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.463935 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-kube-api-access-s757q" (OuterVolumeSpecName: "kube-api-access-s757q") pod "3c5e58eb-31a4-4253-8cb9-a9486bb2d955" (UID: "3c5e58eb-31a4-4253-8cb9-a9486bb2d955"). InnerVolumeSpecName "kube-api-access-s757q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.464252 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d7fb2588-cb2f-4495-ab6d-4f6aef939caf" (UID: "d7fb2588-cb2f-4495-ab6d-4f6aef939caf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.469205 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/465988d0-be74-4295-bb31-4265148803e8-kube-api-access-v66zf" (OuterVolumeSpecName: "kube-api-access-v66zf") pod "465988d0-be74-4295-bb31-4265148803e8" (UID: "465988d0-be74-4295-bb31-4265148803e8"). InnerVolumeSpecName "kube-api-access-v66zf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.471786 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-logs" (OuterVolumeSpecName: "logs") pod "3c5e58eb-31a4-4253-8cb9-a9486bb2d955" (UID: "3c5e58eb-31a4-4253-8cb9-a9486bb2d955"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.486599 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-kube-api-access-nzc98" (OuterVolumeSpecName: "kube-api-access-nzc98") pod "d7fb2588-cb2f-4495-ab6d-4f6aef939caf" (UID: "d7fb2588-cb2f-4495-ab6d-4f6aef939caf"). InnerVolumeSpecName "kube-api-access-nzc98". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.535062 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0958cc92-4dcb-4e10-b592-b3800bfe7a18" path="/var/lib/kubelet/pods/0958cc92-4dcb-4e10-b592-b3800bfe7a18/volumes" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.535860 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3478d8a7-c396-4368-81eb-00d79c45c5b7" path="/var/lib/kubelet/pods/3478d8a7-c396-4368-81eb-00d79c45c5b7/volumes" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.537051 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47bf8499-97a6-4f76-8e2e-25b3fbff1d93" path="/var/lib/kubelet/pods/47bf8499-97a6-4f76-8e2e-25b3fbff1d93/volumes" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.538635 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d395a15-ded3-4216-a09e-85b0305c2225" path="/var/lib/kubelet/pods/4d395a15-ded3-4216-a09e-85b0305c2225/volumes" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.539973 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f256e1b-bd56-4dd3-a150-7660ab6d222f" path="/var/lib/kubelet/pods/6f256e1b-bd56-4dd3-a150-7660ab6d222f/volumes" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.540685 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="907bed97-620e-441c-9539-b8e62c988b52" path="/var/lib/kubelet/pods/907bed97-620e-441c-9539-b8e62c988b52/volumes" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.541631 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9f1619e-893b-4f17-b105-214ccbf6385e" path="/var/lib/kubelet/pods/a9f1619e-893b-4f17-b105-214ccbf6385e/volumes" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.542478 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c5e58eb-31a4-4253-8cb9-a9486bb2d955" (UID: "3c5e58eb-31a4-4253-8cb9-a9486bb2d955"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.552079 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-config-data" (OuterVolumeSpecName: "config-data") pod "3c5e58eb-31a4-4253-8cb9-a9486bb2d955" (UID: "3c5e58eb-31a4-4253-8cb9-a9486bb2d955"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.560660 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-config-data\") pod \"2ad24b50-556b-4799-a598-b7618c1664fd\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.560709 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-logs\") pod \"2ad24b50-556b-4799-a598-b7618c1664fd\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.560754 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-log-httpd\") pod \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.560816 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6xwg\" (UniqueName: \"kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-kube-api-access-w6xwg\") pod \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.560883 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-etc-swift\") pod \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.560919 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-scripts\") pod \"2ad24b50-556b-4799-a598-b7618c1664fd\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.560943 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghl79\" (UniqueName: \"kubernetes.io/projected/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-api-access-ghl79\") pod \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.560980 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-config-data\") pod \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561003 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-combined-ca-bundle\") pod \"2ad24b50-556b-4799-a598-b7618c1664fd\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561028 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-run-httpd\") pod \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561058 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-internal-tls-certs\") pod \"2ad24b50-556b-4799-a598-b7618c1664fd\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561102 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-public-tls-certs\") pod \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561120 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttdxf\" (UniqueName: \"kubernetes.io/projected/2ad24b50-556b-4799-a598-b7618c1664fd-kube-api-access-ttdxf\") pod \"2ad24b50-556b-4799-a598-b7618c1664fd\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561153 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-combined-ca-bundle\") pod \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561173 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-certs\") pod \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561189 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-internal-tls-certs\") pod \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561214 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-config\") pod \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\" (UID: \"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561238 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"2ad24b50-556b-4799-a598-b7618c1664fd\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561259 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-combined-ca-bundle\") pod \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\" (UID: \"ef465c53-5add-41ff-9fcc-00e714bc2bc0\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561277 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-httpd-run\") pod \"2ad24b50-556b-4799-a598-b7618c1664fd\" (UID: \"2ad24b50-556b-4799-a598-b7618c1664fd\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561534 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts\") pod \"keystone-0a1f-account-create-update-f6ks2\" (UID: \"ddea1ab1-d19b-42f5-833a-6578608d54cc\") " pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561585 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dg54\" (UniqueName: \"kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54\") pod \"keystone-0a1f-account-create-update-f6ks2\" (UID: \"ddea1ab1-d19b-42f5-833a-6578608d54cc\") " pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561700 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561717 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v66zf\" (UniqueName: \"kubernetes.io/projected/465988d0-be74-4295-bb31-4265148803e8-kube-api-access-v66zf\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561727 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561736 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561745 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561753 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s757q\" (UniqueName: \"kubernetes.io/projected/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-kube-api-access-s757q\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561760 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561768 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzc98\" (UniqueName: \"kubernetes.io/projected/d7fb2588-cb2f-4495-ab6d-4f6aef939caf-kube-api-access-nzc98\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.561777 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/465988d0-be74-4295-bb31-4265148803e8-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.564005 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ef465c53-5add-41ff-9fcc-00e714bc2bc0" (UID: "ef465c53-5add-41ff-9fcc-00e714bc2bc0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.564236 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-logs" (OuterVolumeSpecName: "logs") pod "2ad24b50-556b-4799-a598-b7618c1664fd" (UID: "2ad24b50-556b-4799-a598-b7618c1664fd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.565531 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-scripts" (OuterVolumeSpecName: "scripts") pod "2ad24b50-556b-4799-a598-b7618c1664fd" (UID: "2ad24b50-556b-4799-a598-b7618c1664fd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.568490 4751 projected.go:194] Error preparing data for projected volume kube-api-access-7dg54 for pod openstack/keystone-0a1f-account-create-update-f6ks2: failed to fetch token: serviceaccounts "galera-openstack" not found Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.568561 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54 podName:ddea1ab1-d19b-42f5-833a-6578608d54cc nodeName:}" failed. No retries permitted until 2026-02-27 16:50:49.568537127 +0000 UTC m=+1611.715551644 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-7dg54" (UniqueName: "kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54") pod "keystone-0a1f-account-create-update-f6ks2" (UID: "ddea1ab1-d19b-42f5-833a-6578608d54cc") : failed to fetch token: serviceaccounts "galera-openstack" not found Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.569072 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ef465c53-5add-41ff-9fcc-00e714bc2bc0" (UID: "ef465c53-5add-41ff-9fcc-00e714bc2bc0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.582570 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "2ad24b50-556b-4799-a598-b7618c1664fd" (UID: "2ad24b50-556b-4799-a598-b7618c1664fd"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.582661 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "ef465c53-5add-41ff-9fcc-00e714bc2bc0" (UID: "ef465c53-5add-41ff-9fcc-00e714bc2bc0"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.582691 4751 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.582816 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts podName:ddea1ab1-d19b-42f5-833a-6578608d54cc nodeName:}" failed. No retries permitted until 2026-02-27 16:50:49.582781747 +0000 UTC m=+1611.729796194 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts") pod "keystone-0a1f-account-create-update-f6ks2" (UID: "ddea1ab1-d19b-42f5-833a-6578608d54cc") : configmap "openstack-scripts" not found Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.608658 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ad24b50-556b-4799-a598-b7618c1664fd-kube-api-access-ttdxf" (OuterVolumeSpecName: "kube-api-access-ttdxf") pod "2ad24b50-556b-4799-a598-b7618c1664fd" (UID: "2ad24b50-556b-4799-a598-b7618c1664fd"). InnerVolumeSpecName "kube-api-access-ttdxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.610690 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-api-access-ghl79" (OuterVolumeSpecName: "kube-api-access-ghl79") pod "54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa" (UID: "54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa"). InnerVolumeSpecName "kube-api-access-ghl79". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.611198 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "2ad24b50-556b-4799-a598-b7618c1664fd" (UID: "2ad24b50-556b-4799-a598-b7618c1664fd"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.611224 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa" (UID: "54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.611259 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa" (UID: "54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.611254 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-kube-api-access-w6xwg" (OuterVolumeSpecName: "kube-api-access-w6xwg") pod "ef465c53-5add-41ff-9fcc-00e714bc2bc0" (UID: "ef465c53-5add-41ff-9fcc-00e714bc2bc0"). InnerVolumeSpecName "kube-api-access-w6xwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663607 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663634 4751 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663645 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6xwg\" (UniqueName: \"kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-kube-api-access-w6xwg\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663656 4751 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ef465c53-5add-41ff-9fcc-00e714bc2bc0-etc-swift\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663664 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663672 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghl79\" (UniqueName: \"kubernetes.io/projected/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-api-access-ghl79\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663680 4751 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ef465c53-5add-41ff-9fcc-00e714bc2bc0-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663688 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttdxf\" (UniqueName: \"kubernetes.io/projected/2ad24b50-556b-4799-a598-b7618c1664fd-kube-api-access-ttdxf\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663697 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663705 4751 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663724 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.663733 4751 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2ad24b50-556b-4799-a598-b7618c1664fd-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.670805 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-config-data" (OuterVolumeSpecName: "config-data") pod "2ad24b50-556b-4799-a598-b7618c1664fd" (UID: "2ad24b50-556b-4799-a598-b7618c1664fd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.697079 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ad24b50-556b-4799-a598-b7618c1664fd" (UID: "2ad24b50-556b-4799-a598-b7618c1664fd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.699780 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3c5e58eb-31a4-4253-8cb9-a9486bb2d955" (UID: "3c5e58eb-31a4-4253-8cb9-a9486bb2d955"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.708919 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.709506 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ef465c53-5add-41ff-9fcc-00e714bc2bc0" (UID: "ef465c53-5add-41ff-9fcc-00e714bc2bc0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.710880 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-config-data" (OuterVolumeSpecName: "config-data") pod "ef465c53-5add-41ff-9fcc-00e714bc2bc0" (UID: "ef465c53-5add-41ff-9fcc-00e714bc2bc0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.725280 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ef465c53-5add-41ff-9fcc-00e714bc2bc0" (UID: "ef465c53-5add-41ff-9fcc-00e714bc2bc0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.729809 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ef465c53-5add-41ff-9fcc-00e714bc2bc0" (UID: "ef465c53-5add-41ff-9fcc-00e714bc2bc0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.730105 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa" (UID: "54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.733007 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3c5e58eb-31a4-4253-8cb9-a9486bb2d955" (UID: "3c5e58eb-31a4-4253-8cb9-a9486bb2d955"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.754752 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "2ad24b50-556b-4799-a598-b7618c1664fd" (UID: "2ad24b50-556b-4799-a598-b7618c1664fd"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766084 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766160 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766226 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766284 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766337 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ad24b50-556b-4799-a598-b7618c1664fd-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766390 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c5e58eb-31a4-4253-8cb9-a9486bb2d955-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766456 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766505 4751 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766553 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766599 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.766655 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef465c53-5add-41ff-9fcc-00e714bc2bc0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.794487 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.795682 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fb4c-account-create-update-7hrjf" event={"ID":"d7fb2588-cb2f-4495-ab6d-4f6aef939caf","Type":"ContainerDied","Data":"ea99d11420fa194c37d5dcbdc2a6ebb761c29bffd17512ae4a56a10d593ba94b"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.795785 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fb4c-account-create-update-7hrjf" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.799683 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-543a-account-create-update-mxr7p" event={"ID":"6c4060a4-e264-4a4a-90ea-4a270cc50940","Type":"ContainerDied","Data":"a58242e3dae08117756e1572de969d04e33194f3205cdf4599e5cf0a72b2f8de"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.799762 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-543a-account-create-update-mxr7p" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.803251 4751 generic.go:334] "Generic (PLEG): container finished" podID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerID="c4a7175c059cf3518ae6eba6d361fbebbc8c52020d2692d9f04fb59309e9cac4" exitCode=0 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.803274 4751 generic.go:334] "Generic (PLEG): container finished" podID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerID="62ed64131f674628306788d24cfc85250f4581f979fadacc28ddf528b64bebfd" exitCode=2 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.803281 4751 generic.go:334] "Generic (PLEG): container finished" podID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerID="5ea526479c46d824bbf94a208fd6d3670757ee20a011265cfd59b145eb86cf1e" exitCode=0 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.803309 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4785321-8f3e-44cb-833c-0b78bc368cd9","Type":"ContainerDied","Data":"c4a7175c059cf3518ae6eba6d361fbebbc8c52020d2692d9f04fb59309e9cac4"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.803328 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4785321-8f3e-44cb-833c-0b78bc368cd9","Type":"ContainerDied","Data":"62ed64131f674628306788d24cfc85250f4581f979fadacc28ddf528b64bebfd"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.803337 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4785321-8f3e-44cb-833c-0b78bc368cd9","Type":"ContainerDied","Data":"5ea526479c46d824bbf94a208fd6d3670757ee20a011265cfd59b145eb86cf1e"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.805519 4751 generic.go:334] "Generic (PLEG): container finished" podID="27f559b3-2c7d-4567-b836-702db66d74ae" containerID="fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9" exitCode=0 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.805648 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.805952 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"27f559b3-2c7d-4567-b836-702db66d74ae","Type":"ContainerDied","Data":"fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.805976 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"27f559b3-2c7d-4567-b836-702db66d74ae","Type":"ContainerDied","Data":"5497e8b0e8cdc95de37e0fce91a6708e2109da49f34acf9b3f21f6f147f5e74c"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.805992 4751 scope.go:117] "RemoveContainer" containerID="fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.808184 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_36495e7a-b8f8-4d54-a504-e92bb6211327/ovn-northd/0.log" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.808216 4751 generic.go:334] "Generic (PLEG): container finished" podID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerID="f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d" exitCode=139 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.808256 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"36495e7a-b8f8-4d54-a504-e92bb6211327","Type":"ContainerDied","Data":"f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.809312 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c1fb-account-create-update-zkj9t" event={"ID":"465988d0-be74-4295-bb31-4265148803e8","Type":"ContainerDied","Data":"60d364d2a8d1d18c70f72c4a5dd1e1482e45b16d6647e104afcb27083233f8b2"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.809382 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c1fb-account-create-update-zkj9t" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.814373 4751 generic.go:334] "Generic (PLEG): container finished" podID="3c3834ac-6796-485b-9dec-e45cebf976df" containerID="c70cb3e82423521f3790af75416c702817be4dee431d0d08dcd2396683feb66d" exitCode=0 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.814526 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3c3834ac-6796-485b-9dec-e45cebf976df","Type":"ContainerDied","Data":"c70cb3e82423521f3790af75416c702817be4dee431d0d08dcd2396683feb66d"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.817709 4751 generic.go:334] "Generic (PLEG): container finished" podID="ef932397-22e9-4d46-90e3-57076299d4cf" containerID="2845974abbc25e68928a72daeb08093bf2536ab0bc6998e59ff8fa1ec52eba91" exitCode=0 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.817784 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef932397-22e9-4d46-90e3-57076299d4cf","Type":"ContainerDied","Data":"2845974abbc25e68928a72daeb08093bf2536ab0bc6998e59ff8fa1ec52eba91"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.819274 4751 generic.go:334] "Generic (PLEG): container finished" podID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerID="a91412d1338cfce1b6aed60bd52a679afa01513653b047734294840a9a916ff5" exitCode=0 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.819317 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa","Type":"ContainerDied","Data":"a91412d1338cfce1b6aed60bd52a679afa01513653b047734294840a9a916ff5"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.821918 4751 generic.go:334] "Generic (PLEG): container finished" podID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerID="9fd5df7074b1fc7b9bf2a447c5d88215370bd4201e24afb0b45b856f50e14328" exitCode=0 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.821973 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-748c66fdb6-xsx5t" event={"ID":"f9ca6eb2-820e-49ea-80ca-bd0e352d4243","Type":"ContainerDied","Data":"9fd5df7074b1fc7b9bf2a447c5d88215370bd4201e24afb0b45b856f50e14328"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.824775 4751 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-hvcjf" secret="" err="secret \"galera-openstack-dockercfg-ssvwq\" not found" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.824849 4751 scope.go:117] "RemoveContainer" containerID="47acd381e9eb85a84aea0efaae97b586799511423dd5af51c19474800fbeefeb" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.825212 4751 generic.go:334] "Generic (PLEG): container finished" podID="54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa" containerID="4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab" exitCode=2 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.825269 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa","Type":"ContainerDied","Data":"4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.825282 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.825287 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa","Type":"ContainerDied","Data":"385584b4a41f7c4ef34d9bd57960ad21c65a50948c309a4e0d4fb7cf3f3812c6"} Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.825221 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CrashLoopBackOff: \"back-off 10s restarting failed container=mariadb-account-create-update pod=root-account-create-update-hvcjf_openstack(7dc9beed-8444-4389-8859-234af0090157)\"" pod="openstack/root-account-create-update-hvcjf" podUID="7dc9beed-8444-4389-8859-234af0090157" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.828314 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.828466 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c5d5b6fdd-9d8xv" event={"ID":"3c5e58eb-31a4-4253-8cb9-a9486bb2d955","Type":"ContainerDied","Data":"a7dc5bbdf1330339eaa6d5cb30861bb716c4c5dcc8398476977a1a140453e867"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.828532 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5c5d5b6fdd-9d8xv" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.831393 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d758-account-create-update-jpvqv" event={"ID":"b959a608-80f8-43f4-81a4-203b9a27467d","Type":"ContainerDied","Data":"ddea37d668ad6ba768204e6980bab21fe72d6ccedfb0ee35e65e653881ad6a26"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.831549 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d758-account-create-update-jpvqv" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.833709 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ccb964dc9-cj74q" event={"ID":"ef465c53-5add-41ff-9fcc-00e714bc2bc0","Type":"ContainerDied","Data":"6172eb80202c750be3ee41ecfc973c9f136b8ecf00b92047157e23d3dc01529f"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.833784 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-ccb964dc9-cj74q" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.845283 4751 scope.go:117] "RemoveContainer" containerID="1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.845715 4751 generic.go:334] "Generic (PLEG): container finished" podID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerID="95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854" exitCode=0 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.845765 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"68bcf8b3-a271-47f0-9815-17cd3fdaec3e","Type":"ContainerDied","Data":"95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.845788 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"68bcf8b3-a271-47f0-9815-17cd3fdaec3e","Type":"ContainerDied","Data":"067057f4d2b8204c4f197162308579313fb901d7226b09612ed331c69316ab6f"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.846169 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.855322 4751 generic.go:334] "Generic (PLEG): container finished" podID="2ad24b50-556b-4799-a598-b7618c1664fd" containerID="784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8" exitCode=0 Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.855429 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.855513 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2ad24b50-556b-4799-a598-b7618c1664fd","Type":"ContainerDied","Data":"784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.855548 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2ad24b50-556b-4799-a598-b7618c1664fd","Type":"ContainerDied","Data":"0295219047b4cea9155062b971b187d656f90229fd18c0bfc6f8a4a588b308d1"} Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.855567 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.971604 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data-custom\") pod \"27f559b3-2c7d-4567-b836-702db66d74ae\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.971668 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-combined-ca-bundle\") pod \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.971781 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-combined-ca-bundle\") pod \"27f559b3-2c7d-4567-b836-702db66d74ae\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.971857 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-nova-metadata-tls-certs\") pod \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.971952 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data\") pod \"27f559b3-2c7d-4567-b836-702db66d74ae\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.971992 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-public-tls-certs\") pod \"27f559b3-2c7d-4567-b836-702db66d74ae\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.972025 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/27f559b3-2c7d-4567-b836-702db66d74ae-etc-machine-id\") pod \"27f559b3-2c7d-4567-b836-702db66d74ae\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.972049 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42kg7\" (UniqueName: \"kubernetes.io/projected/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-kube-api-access-42kg7\") pod \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.972135 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-internal-tls-certs\") pod \"27f559b3-2c7d-4567-b836-702db66d74ae\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.972486 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kphrv\" (UniqueName: \"kubernetes.io/projected/27f559b3-2c7d-4567-b836-702db66d74ae-kube-api-access-kphrv\") pod \"27f559b3-2c7d-4567-b836-702db66d74ae\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.972528 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27f559b3-2c7d-4567-b836-702db66d74ae-logs\") pod \"27f559b3-2c7d-4567-b836-702db66d74ae\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.972627 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-logs\") pod \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.972671 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-config-data\") pod \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\" (UID: \"68bcf8b3-a271-47f0-9815-17cd3fdaec3e\") " Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.972703 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-scripts\") pod \"27f559b3-2c7d-4567-b836-702db66d74ae\" (UID: \"27f559b3-2c7d-4567-b836-702db66d74ae\") " Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.976254 4751 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Feb 27 16:50:48 crc kubenswrapper[4751]: E0227 16:50:48.976329 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7dc9beed-8444-4389-8859-234af0090157-operator-scripts podName:7dc9beed-8444-4389-8859-234af0090157 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:49.476308672 +0000 UTC m=+1611.623323119 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/7dc9beed-8444-4389-8859-234af0090157-operator-scripts") pod "root-account-create-update-hvcjf" (UID: "7dc9beed-8444-4389-8859-234af0090157") : configmap "openstack-scripts" not found Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.976722 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/27f559b3-2c7d-4567-b836-702db66d74ae-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "27f559b3-2c7d-4567-b836-702db66d74ae" (UID: "27f559b3-2c7d-4567-b836-702db66d74ae"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.977359 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27f559b3-2c7d-4567-b836-702db66d74ae-logs" (OuterVolumeSpecName: "logs") pod "27f559b3-2c7d-4567-b836-702db66d74ae" (UID: "27f559b3-2c7d-4567-b836-702db66d74ae"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.977833 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-logs" (OuterVolumeSpecName: "logs") pod "68bcf8b3-a271-47f0-9815-17cd3fdaec3e" (UID: "68bcf8b3-a271-47f0-9815-17cd3fdaec3e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.978988 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "27f559b3-2c7d-4567-b836-702db66d74ae" (UID: "27f559b3-2c7d-4567-b836-702db66d74ae"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.981916 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-scripts" (OuterVolumeSpecName: "scripts") pod "27f559b3-2c7d-4567-b836-702db66d74ae" (UID: "27f559b3-2c7d-4567-b836-702db66d74ae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.989971 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27f559b3-2c7d-4567-b836-702db66d74ae-kube-api-access-kphrv" (OuterVolumeSpecName: "kube-api-access-kphrv") pod "27f559b3-2c7d-4567-b836-702db66d74ae" (UID: "27f559b3-2c7d-4567-b836-702db66d74ae"). InnerVolumeSpecName "kube-api-access-kphrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:48 crc kubenswrapper[4751]: I0227 16:50:48.998761 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-kube-api-access-42kg7" (OuterVolumeSpecName: "kube-api-access-42kg7") pod "68bcf8b3-a271-47f0-9815-17cd3fdaec3e" (UID: "68bcf8b3-a271-47f0-9815-17cd3fdaec3e"). InnerVolumeSpecName "kube-api-access-42kg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.055594 4751 scope.go:117] "RemoveContainer" containerID="fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.057208 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9\": container with ID starting with fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9 not found: ID does not exist" containerID="fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.057279 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.057269 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9"} err="failed to get container status \"fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9\": rpc error: code = NotFound desc = could not find container \"fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9\": container with ID starting with fa81b9ef2d02d79bb411ca1a7c2d1c560865c6e665f8780f4d1d418fefd52da9 not found: ID does not exist" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.057363 4751 scope.go:117] "RemoveContainer" containerID="1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.057739 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1\": container with ID starting with 1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1 not found: ID does not exist" containerID="1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.057767 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1"} err="failed to get container status \"1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1\": rpc error: code = NotFound desc = could not find container \"1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1\": container with ID starting with 1605ce034c9a31b1ba7385475f66a6cf4c1eed04b5e224929bfc6e00a7735ec1 not found: ID does not exist" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.057791 4751 scope.go:117] "RemoveContainer" containerID="4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.068897 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-config-data" (OuterVolumeSpecName: "config-data") pod "68bcf8b3-a271-47f0-9815-17cd3fdaec3e" (UID: "68bcf8b3-a271-47f0-9815-17cd3fdaec3e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.075925 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data" (OuterVolumeSpecName: "config-data") pod "27f559b3-2c7d-4567-b836-702db66d74ae" (UID: "27f559b3-2c7d-4567-b836-702db66d74ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.076573 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.077463 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.077489 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.077502 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.077514 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.077525 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.077545 4751 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/27f559b3-2c7d-4567-b836-702db66d74ae-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.077556 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42kg7\" (UniqueName: \"kubernetes.io/projected/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-kube-api-access-42kg7\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.077568 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kphrv\" (UniqueName: \"kubernetes.io/projected/27f559b3-2c7d-4567-b836-702db66d74ae-kube-api-access-kphrv\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.077577 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27f559b3-2c7d-4567-b836-702db66d74ae-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.084745 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "27f559b3-2c7d-4567-b836-702db66d74ae" (UID: "27f559b3-2c7d-4567-b836-702db66d74ae"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.087032 4751 scope.go:117] "RemoveContainer" containerID="4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.087574 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab\": container with ID starting with 4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab not found: ID does not exist" containerID="4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.087606 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab"} err="failed to get container status \"4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab\": rpc error: code = NotFound desc = could not find container \"4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab\": container with ID starting with 4e7234aa1cb6ed2ff691d6f7cb4d5496e2a8b1a43a07d04951122d27b3a31bab not found: ID does not exist" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.087624 4751 scope.go:117] "RemoveContainer" containerID="d68a8fb32c3c122cd258ca89b0d0d8f27592db1bade310c58767879538bba0eb" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.095996 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_36495e7a-b8f8-4d54-a504-e92bb6211327/ovn-northd/0.log" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.096107 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.116701 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "68bcf8b3-a271-47f0-9815-17cd3fdaec3e" (UID: "68bcf8b3-a271-47f0-9815-17cd3fdaec3e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.119726 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27f559b3-2c7d-4567-b836-702db66d74ae" (UID: "27f559b3-2c7d-4567-b836-702db66d74ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.137909 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "68bcf8b3-a271-47f0-9815-17cd3fdaec3e" (UID: "68bcf8b3-a271-47f0-9815-17cd3fdaec3e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.140049 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.144412 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-ccb964dc9-cj74q"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.152867 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.157562 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "27f559b3-2c7d-4567-b836-702db66d74ae" (UID: "27f559b3-2c7d-4567-b836-702db66d74ae"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.165816 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-ccb964dc9-cj74q"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.168807 4751 scope.go:117] "RemoveContainer" containerID="fe0b48cb4c4111dfc56e9cc80355b87b652df2aa7701be61b0f630ca7e55427a" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.170706 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.178985 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data\") pod \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.179151 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-combined-ca-bundle\") pod \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.179237 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58748\" (UniqueName: \"kubernetes.io/projected/36495e7a-b8f8-4d54-a504-e92bb6211327-kube-api-access-58748\") pod \"36495e7a-b8f8-4d54-a504-e92bb6211327\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.179323 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-scripts\") pod \"36495e7a-b8f8-4d54-a504-e92bb6211327\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.179368 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-public-tls-certs\") pod \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.179453 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data-custom\") pod \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.183210 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xctv6\" (UniqueName: \"kubernetes.io/projected/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-kube-api-access-xctv6\") pod \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.183310 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-northd-tls-certs\") pod \"36495e7a-b8f8-4d54-a504-e92bb6211327\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.183363 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-logs\") pod \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.183433 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-rundir\") pod \"36495e7a-b8f8-4d54-a504-e92bb6211327\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.183545 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-combined-ca-bundle\") pod \"36495e7a-b8f8-4d54-a504-e92bb6211327\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.183618 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-config\") pod \"36495e7a-b8f8-4d54-a504-e92bb6211327\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.183642 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-internal-tls-certs\") pod \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\" (UID: \"f9ca6eb2-820e-49ea-80ca-bd0e352d4243\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.183752 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-metrics-certs-tls-certs\") pod \"36495e7a-b8f8-4d54-a504-e92bb6211327\" (UID: \"36495e7a-b8f8-4d54-a504-e92bb6211327\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.184512 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.184537 4751 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.184551 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.184562 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/27f559b3-2c7d-4567-b836-702db66d74ae-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.184574 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68bcf8b3-a271-47f0-9815-17cd3fdaec3e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.188341 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-scripts" (OuterVolumeSpecName: "scripts") pod "36495e7a-b8f8-4d54-a504-e92bb6211327" (UID: "36495e7a-b8f8-4d54-a504-e92bb6211327"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.188686 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-logs" (OuterVolumeSpecName: "logs") pod "f9ca6eb2-820e-49ea-80ca-bd0e352d4243" (UID: "f9ca6eb2-820e-49ea-80ca-bd0e352d4243"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.189252 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-config" (OuterVolumeSpecName: "config") pod "36495e7a-b8f8-4d54-a504-e92bb6211327" (UID: "36495e7a-b8f8-4d54-a504-e92bb6211327"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.201567 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36495e7a-b8f8-4d54-a504-e92bb6211327-kube-api-access-58748" (OuterVolumeSpecName: "kube-api-access-58748") pod "36495e7a-b8f8-4d54-a504-e92bb6211327" (UID: "36495e7a-b8f8-4d54-a504-e92bb6211327"). InnerVolumeSpecName "kube-api-access-58748". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.201577 4751 scope.go:117] "RemoveContainer" containerID="668b3715f8d8476a3e1d9d7443b5adb7e8ae4b4b6eac2c5be4a3dc6216b3c24f" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.202740 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.203161 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "36495e7a-b8f8-4d54-a504-e92bb6211327" (UID: "36495e7a-b8f8-4d54-a504-e92bb6211327"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.211094 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-kube-api-access-xctv6" (OuterVolumeSpecName: "kube-api-access-xctv6") pod "f9ca6eb2-820e-49ea-80ca-bd0e352d4243" (UID: "f9ca6eb2-820e-49ea-80ca-bd0e352d4243"). InnerVolumeSpecName "kube-api-access-xctv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.211565 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f9ca6eb2-820e-49ea-80ca-bd0e352d4243" (UID: "f9ca6eb2-820e-49ea-80ca-bd0e352d4243"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.222926 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.247829 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-d758-account-create-update-jpvqv"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.249062 4751 scope.go:117] "RemoveContainer" containerID="843802b514320212732f6a6e3503b615909bac4f2d8f4d4458b80f0b1046f521" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.252633 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-d758-account-create-update-jpvqv"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.267143 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "36495e7a-b8f8-4d54-a504-e92bb6211327" (UID: "36495e7a-b8f8-4d54-a504-e92bb6211327"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.270283 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5c5d5b6fdd-9d8xv"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.274716 4751 scope.go:117] "RemoveContainer" containerID="95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.283546 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f9ca6eb2-820e-49ea-80ca-bd0e352d4243" (UID: "f9ca6eb2-820e-49ea-80ca-bd0e352d4243"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285431 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-scripts\") pod \"ef932397-22e9-4d46-90e3-57076299d4cf\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285475 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6zwb\" (UniqueName: \"kubernetes.io/projected/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-kube-api-access-f6zwb\") pod \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285505 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-config-data\") pod \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285527 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-combined-ca-bundle\") pod \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285547 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-kolla-config\") pod \"3c3834ac-6796-485b-9dec-e45cebf976df\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285639 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-combined-ca-bundle\") pod \"ef932397-22e9-4d46-90e3-57076299d4cf\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285662 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-config-data\") pod \"ef932397-22e9-4d46-90e3-57076299d4cf\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285693 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hhcz\" (UniqueName: \"kubernetes.io/projected/ef932397-22e9-4d46-90e3-57076299d4cf-kube-api-access-8hhcz\") pod \"ef932397-22e9-4d46-90e3-57076299d4cf\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285714 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-logs\") pod \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285737 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xh4td\" (UniqueName: \"kubernetes.io/projected/3c3834ac-6796-485b-9dec-e45cebf976df-kube-api-access-xh4td\") pod \"3c3834ac-6796-485b-9dec-e45cebf976df\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285783 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-internal-tls-certs\") pod \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285828 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-public-tls-certs\") pod \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\" (UID: \"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285885 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-combined-ca-bundle\") pod \"3c3834ac-6796-485b-9dec-e45cebf976df\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.285903 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-httpd-run\") pod \"ef932397-22e9-4d46-90e3-57076299d4cf\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286011 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-logs\") pod \"ef932397-22e9-4d46-90e3-57076299d4cf\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286050 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-public-tls-certs\") pod \"ef932397-22e9-4d46-90e3-57076299d4cf\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286071 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-memcached-tls-certs\") pod \"3c3834ac-6796-485b-9dec-e45cebf976df\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286103 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-config-data\") pod \"3c3834ac-6796-485b-9dec-e45cebf976df\" (UID: \"3c3834ac-6796-485b-9dec-e45cebf976df\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286126 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ef932397-22e9-4d46-90e3-57076299d4cf\" (UID: \"ef932397-22e9-4d46-90e3-57076299d4cf\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286464 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286476 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286486 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58748\" (UniqueName: \"kubernetes.io/projected/36495e7a-b8f8-4d54-a504-e92bb6211327-kube-api-access-58748\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286494 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/36495e7a-b8f8-4d54-a504-e92bb6211327-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286503 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286511 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xctv6\" (UniqueName: \"kubernetes.io/projected/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-kube-api-access-xctv6\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286519 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286527 4751 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-rundir\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.286535 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.298745 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ef932397-22e9-4d46-90e3-57076299d4cf" (UID: "ef932397-22e9-4d46-90e3-57076299d4cf"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.299489 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "3c3834ac-6796-485b-9dec-e45cebf976df" (UID: "3c3834ac-6796-485b-9dec-e45cebf976df"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.300929 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-logs" (OuterVolumeSpecName: "logs") pod "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" (UID: "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.305949 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-logs" (OuterVolumeSpecName: "logs") pod "ef932397-22e9-4d46-90e3-57076299d4cf" (UID: "ef932397-22e9-4d46-90e3-57076299d4cf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.299064 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5c5d5b6fdd-9d8xv"] Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.301676 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65 is running failed: container process not found" containerID="93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.306271 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-config-data" (OuterVolumeSpecName: "config-data") pod "3c3834ac-6796-485b-9dec-e45cebf976df" (UID: "3c3834ac-6796-485b-9dec-e45cebf976df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.306527 4751 scope.go:117] "RemoveContainer" containerID="efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.309794 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65 is running failed: container process not found" containerID="93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.310391 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65 is running failed: container process not found" containerID="93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.310450 4751 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="7555b92d-c801-4da2-8d2e-78fa39c892d2" containerName="nova-scheduler-scheduler" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.312186 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "ef932397-22e9-4d46-90e3-57076299d4cf" (UID: "ef932397-22e9-4d46-90e3-57076299d4cf"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.325379 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c3834ac-6796-485b-9dec-e45cebf976df-kube-api-access-xh4td" (OuterVolumeSpecName: "kube-api-access-xh4td") pod "3c3834ac-6796-485b-9dec-e45cebf976df" (UID: "3c3834ac-6796-485b-9dec-e45cebf976df"). InnerVolumeSpecName "kube-api-access-xh4td". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.336539 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-scripts" (OuterVolumeSpecName: "scripts") pod "ef932397-22e9-4d46-90e3-57076299d4cf" (UID: "ef932397-22e9-4d46-90e3-57076299d4cf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.336812 4751 scope.go:117] "RemoveContainer" containerID="95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.337292 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854\": container with ID starting with 95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854 not found: ID does not exist" containerID="95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.337323 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854"} err="failed to get container status \"95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854\": rpc error: code = NotFound desc = could not find container \"95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854\": container with ID starting with 95dbd9481ab5bf19368b4e9a5596159862e13485f791a125ad43cdef90030854 not found: ID does not exist" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.337343 4751 scope.go:117] "RemoveContainer" containerID="efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.337786 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20\": container with ID starting with efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20 not found: ID does not exist" containerID="efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.338549 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20"} err="failed to get container status \"efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20\": rpc error: code = NotFound desc = could not find container \"efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20\": container with ID starting with efdfb1b8b12751d73f4a5fcda2b86dfb5e8bc636338103308612ebd200159f20 not found: ID does not exist" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.338700 4751 scope.go:117] "RemoveContainer" containerID="784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.364685 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-kube-api-access-f6zwb" (OuterVolumeSpecName: "kube-api-access-f6zwb") pod "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" (UID: "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa"). InnerVolumeSpecName "kube-api-access-f6zwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.371187 4751 scope.go:117] "RemoveContainer" containerID="e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.375908 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef932397-22e9-4d46-90e3-57076299d4cf-kube-api-access-8hhcz" (OuterVolumeSpecName: "kube-api-access-8hhcz") pod "ef932397-22e9-4d46-90e3-57076299d4cf" (UID: "ef932397-22e9-4d46-90e3-57076299d4cf"). InnerVolumeSpecName "kube-api-access-8hhcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.380592 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-fb4c-account-create-update-7hrjf"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.386852 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-fb4c-account-create-update-7hrjf"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.389685 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.389711 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6zwb\" (UniqueName: \"kubernetes.io/projected/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-kube-api-access-f6zwb\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.389724 4751 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-kolla-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.389734 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hhcz\" (UniqueName: \"kubernetes.io/projected/ef932397-22e9-4d46-90e3-57076299d4cf-kube-api-access-8hhcz\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.389741 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.389749 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xh4td\" (UniqueName: \"kubernetes.io/projected/3c3834ac-6796-485b-9dec-e45cebf976df-kube-api-access-xh4td\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.389757 4751 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.389764 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef932397-22e9-4d46-90e3-57076299d4cf-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.389772 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3c3834ac-6796-485b-9dec-e45cebf976df-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.389791 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.406708 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.413645 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.414053 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.428340 4751 scope.go:117] "RemoveContainer" containerID="784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.429257 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8\": container with ID starting with 784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8 not found: ID does not exist" containerID="784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.429290 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8"} err="failed to get container status \"784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8\": rpc error: code = NotFound desc = could not find container \"784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8\": container with ID starting with 784dd66d4d1ab2b759ae49b26768db9fa74a32ff3519e2e85b622211356464e8 not found: ID does not exist" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.429309 4751 scope.go:117] "RemoveContainer" containerID="e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.430225 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e\": container with ID starting with e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e not found: ID does not exist" containerID="e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.430276 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e"} err="failed to get container status \"e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e\": rpc error: code = NotFound desc = could not find container \"e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e\": container with ID starting with e818dd36bb380e38ca7e8c06d9f356dc7568cbb2837fa401aecb68a64720dc8e not found: ID does not exist" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.434789 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-c1fb-account-create-update-zkj9t"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.439709 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-c1fb-account-create-update-zkj9t"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.466726 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-543a-account-create-update-mxr7p"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.471359 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" (UID: "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.473167 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-543a-account-create-update-mxr7p"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.473545 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ef932397-22e9-4d46-90e3-57076299d4cf" (UID: "ef932397-22e9-4d46-90e3-57076299d4cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.482573 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f9ca6eb2-820e-49ea-80ca-bd0e352d4243" (UID: "f9ca6eb2-820e-49ea-80ca-bd0e352d4243"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.484555 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.485614 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f9ca6eb2-820e-49ea-80ca-bd0e352d4243" (UID: "f9ca6eb2-820e-49ea-80ca-bd0e352d4243"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.490792 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.490817 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.490825 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.490834 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.490885 4751 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.490923 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7dc9beed-8444-4389-8859-234af0090157-operator-scripts podName:7dc9beed-8444-4389-8859-234af0090157 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:50.49091022 +0000 UTC m=+1612.637924667 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/7dc9beed-8444-4389-8859-234af0090157-operator-scripts") pod "root-account-create-update-hvcjf" (UID: "7dc9beed-8444-4389-8859-234af0090157") : configmap "openstack-scripts" not found Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.491458 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.493648 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c3834ac-6796-485b-9dec-e45cebf976df" (UID: "3c3834ac-6796-485b-9dec-e45cebf976df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.502506 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.510970 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.518660 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.523023 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "36495e7a-b8f8-4d54-a504-e92bb6211327" (UID: "36495e7a-b8f8-4d54-a504-e92bb6211327"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.523831 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "3c3834ac-6796-485b-9dec-e45cebf976df" (UID: "3c3834ac-6796-485b-9dec-e45cebf976df"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.526179 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ef932397-22e9-4d46-90e3-57076299d4cf" (UID: "ef932397-22e9-4d46-90e3-57076299d4cf"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.528850 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "36495e7a-b8f8-4d54-a504-e92bb6211327" (UID: "36495e7a-b8f8-4d54-a504-e92bb6211327"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.529888 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-config-data" (OuterVolumeSpecName: "config-data") pod "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" (UID: "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.531190 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data" (OuterVolumeSpecName: "config-data") pod "f9ca6eb2-820e-49ea-80ca-bd0e352d4243" (UID: "f9ca6eb2-820e-49ea-80ca-bd0e352d4243"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.539765 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" (UID: "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.548108 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-config-data" (OuterVolumeSpecName: "config-data") pod "ef932397-22e9-4d46-90e3-57076299d4cf" (UID: "ef932397-22e9-4d46-90e3-57076299d4cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.549534 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" (UID: "1a8f14c4-f8bc-4247-b2a2-72aa4801adfa"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.593920 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-combined-ca-bundle\") pod \"7555b92d-c801-4da2-8d2e-78fa39c892d2\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.593974 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjbgd\" (UniqueName: \"kubernetes.io/projected/7555b92d-c801-4da2-8d2e-78fa39c892d2-kube-api-access-bjbgd\") pod \"7555b92d-c801-4da2-8d2e-78fa39c892d2\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594130 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-config-data\") pod \"7555b92d-c801-4da2-8d2e-78fa39c892d2\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594396 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts\") pod \"keystone-0a1f-account-create-update-f6ks2\" (UID: \"ddea1ab1-d19b-42f5-833a-6578608d54cc\") " pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594465 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dg54\" (UniqueName: \"kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54\") pod \"keystone-0a1f-account-create-update-f6ks2\" (UID: \"ddea1ab1-d19b-42f5-833a-6578608d54cc\") " pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594537 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594553 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594562 4751 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594571 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594581 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9ca6eb2-820e-49ea-80ca-bd0e352d4243-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594590 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594598 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594606 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef932397-22e9-4d46-90e3-57076299d4cf-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594614 4751 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c3834ac-6796-485b-9dec-e45cebf976df-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594622 4751 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/36495e7a-b8f8-4d54-a504-e92bb6211327-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.594629 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.596501 4751 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.596554 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts podName:ddea1ab1-d19b-42f5-833a-6578608d54cc nodeName:}" failed. No retries permitted until 2026-02-27 16:50:51.596540222 +0000 UTC m=+1613.743554669 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts") pod "keystone-0a1f-account-create-update-f6ks2" (UID: "ddea1ab1-d19b-42f5-833a-6578608d54cc") : configmap "openstack-scripts" not found Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.597495 4751 projected.go:194] Error preparing data for projected volume kube-api-access-7dg54 for pod openstack/keystone-0a1f-account-create-update-f6ks2: failed to fetch token: serviceaccounts "galera-openstack" not found Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.597660 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54 podName:ddea1ab1-d19b-42f5-833a-6578608d54cc nodeName:}" failed. No retries permitted until 2026-02-27 16:50:51.59762388 +0000 UTC m=+1613.744638327 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-7dg54" (UniqueName: "kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54") pod "keystone-0a1f-account-create-update-f6ks2" (UID: "ddea1ab1-d19b-42f5-833a-6578608d54cc") : failed to fetch token: serviceaccounts "galera-openstack" not found Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.599099 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.600234 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7555b92d-c801-4da2-8d2e-78fa39c892d2-kube-api-access-bjbgd" (OuterVolumeSpecName: "kube-api-access-bjbgd") pod "7555b92d-c801-4da2-8d2e-78fa39c892d2" (UID: "7555b92d-c801-4da2-8d2e-78fa39c892d2"). InnerVolumeSpecName "kube-api-access-bjbgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.600785 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.601849 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.601892 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="2d8d9ed0-8606-47cb-a164-7e6bbac390cd" containerName="nova-cell1-conductor-conductor" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.610433 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-combined-ca-bundle podName:7555b92d-c801-4da2-8d2e-78fa39c892d2 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:50.11037764 +0000 UTC m=+1612.257392087 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-combined-ca-bundle") pod "7555b92d-c801-4da2-8d2e-78fa39c892d2" (UID: "7555b92d-c801-4da2-8d2e-78fa39c892d2") : error deleting /var/lib/kubelet/pods/7555b92d-c801-4da2-8d2e-78fa39c892d2/volume-subpaths: remove /var/lib/kubelet/pods/7555b92d-c801-4da2-8d2e-78fa39c892d2/volume-subpaths: no such file or directory Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.612303 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-config-data" (OuterVolumeSpecName: "config-data") pod "7555b92d-c801-4da2-8d2e-78fa39c892d2" (UID: "7555b92d-c801-4da2-8d2e-78fa39c892d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.696654 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.697086 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjbgd\" (UniqueName: \"kubernetes.io/projected/7555b92d-c801-4da2-8d2e-78fa39c892d2-kube-api-access-bjbgd\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.799502 4751 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Feb 27 16:50:49 crc kubenswrapper[4751]: E0227 16:50:49.799715 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data podName:cecf602c-dec2-40c6-922c-bf84b707b1b9 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:57.799687859 +0000 UTC m=+1619.946702326 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data") pod "rabbitmq-cell1-server-0" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9") : configmap "rabbitmq-cell1-config-data" not found Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.832529 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.211:3000/\": dial tcp 10.217.0.211:3000: connect: connection refused" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.867590 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_36495e7a-b8f8-4d54-a504-e92bb6211327/ovn-northd/0.log" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.867673 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"36495e7a-b8f8-4d54-a504-e92bb6211327","Type":"ContainerDied","Data":"4f81e2702cb5555abb4b0fcf13911e5fc069527acbd24c2bf2ca02167e55e1c3"} Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.867715 4751 scope.go:117] "RemoveContainer" containerID="8f37b9a53b57fd59b8d193823dd9bac3b95253b3c09ec6d44395ab006d4399e8" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.867849 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.875591 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.875588 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3c3834ac-6796-485b-9dec-e45cebf976df","Type":"ContainerDied","Data":"8d4fa0205354e077863339c8fee33d79b4aa4af594cd5627463fc9a54d9eef37"} Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.879609 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef932397-22e9-4d46-90e3-57076299d4cf","Type":"ContainerDied","Data":"2cd146dc73af18c958533dc6b08a6c4910148ec1236dcae4148ea3c80ca24ea9"} Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.879720 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.892188 4751 generic.go:334] "Generic (PLEG): container finished" podID="7555b92d-c801-4da2-8d2e-78fa39c892d2" containerID="93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65" exitCode=0 Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.892277 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7555b92d-c801-4da2-8d2e-78fa39c892d2","Type":"ContainerDied","Data":"93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65"} Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.892302 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7555b92d-c801-4da2-8d2e-78fa39c892d2","Type":"ContainerDied","Data":"511808db6495adc566dd8f0439256b3b7a63c64b047ec4bb690f7e87c6840453"} Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.892363 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.901519 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1a8f14c4-f8bc-4247-b2a2-72aa4801adfa","Type":"ContainerDied","Data":"b2e6d637a45de991813b9f92b6686d3bb835f980ea0aed40e243770b76f696e4"} Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.901596 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.906491 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-748c66fdb6-xsx5t" event={"ID":"f9ca6eb2-820e-49ea-80ca-bd0e352d4243","Type":"ContainerDied","Data":"834578fa0ad9ff02015b899a85b151bed3517c1c81a849a270c3f6aefecfe61d"} Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.906681 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-748c66fdb6-xsx5t" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.923718 4751 scope.go:117] "RemoveContainer" containerID="f077319db94e719684cff2b1abac38bddd05de9e2a8257b1d62586df2368fb1d" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.958775 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a1f-account-create-update-f6ks2" Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.961928 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Feb 27 16:50:49 crc kubenswrapper[4751]: I0227 16:50:49.980091 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.011089 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.018052 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.043045 4751 scope.go:117] "RemoveContainer" containerID="c70cb3e82423521f3790af75416c702817be4dee431d0d08dcd2396683feb66d" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.055295 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-748c66fdb6-xsx5t"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.055337 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-748c66fdb6-xsx5t"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.059647 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.068144 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.079745 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-0a1f-account-create-update-f6ks2"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.083951 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-0a1f-account-create-update-f6ks2"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.088264 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.092604 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.096867 4751 scope.go:117] "RemoveContainer" containerID="2845974abbc25e68928a72daeb08093bf2536ab0bc6998e59ff8fa1ec52eba91" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.118166 4751 scope.go:117] "RemoveContainer" containerID="d6c9a2d485d12adf1118656837947a9c97a35defd0392ad64dca5800e77a1603" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.133561 4751 scope.go:117] "RemoveContainer" containerID="93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.152810 4751 scope.go:117] "RemoveContainer" containerID="93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65" Feb 27 16:50:50 crc kubenswrapper[4751]: E0227 16:50:50.153364 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65\": container with ID starting with 93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65 not found: ID does not exist" containerID="93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.153391 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65"} err="failed to get container status \"93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65\": rpc error: code = NotFound desc = could not find container \"93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65\": container with ID starting with 93a39ee11e77c8d1f29d5bc7f4d914ab63eb4c519c60efab540473556ec26c65 not found: ID does not exist" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.153434 4751 scope.go:117] "RemoveContainer" containerID="a91412d1338cfce1b6aed60bd52a679afa01513653b047734294840a9a916ff5" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.173972 4751 scope.go:117] "RemoveContainer" containerID="7093fc3fe4d41f6bca93c56cdb9de8d375834320491b81f1d7f637c564ea6641" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.210528 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-combined-ca-bundle\") pod \"7555b92d-c801-4da2-8d2e-78fa39c892d2\" (UID: \"7555b92d-c801-4da2-8d2e-78fa39c892d2\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.211043 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dg54\" (UniqueName: \"kubernetes.io/projected/ddea1ab1-d19b-42f5-833a-6578608d54cc-kube-api-access-7dg54\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.211060 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddea1ab1-d19b-42f5-833a-6578608d54cc-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.216567 4751 scope.go:117] "RemoveContainer" containerID="9fd5df7074b1fc7b9bf2a447c5d88215370bd4201e24afb0b45b856f50e14328" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.230428 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7555b92d-c801-4da2-8d2e-78fa39c892d2" (UID: "7555b92d-c801-4da2-8d2e-78fa39c892d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.256625 4751 scope.go:117] "RemoveContainer" containerID="66ec49a151bde81e12512fb05eabd11d784e82af2fa19e9c977a0f218bb55c6d" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.313382 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7555b92d-c801-4da2-8d2e-78fa39c892d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.315806 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-hvcjf" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.415158 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dc9beed-8444-4389-8859-234af0090157-operator-scripts\") pod \"7dc9beed-8444-4389-8859-234af0090157\" (UID: \"7dc9beed-8444-4389-8859-234af0090157\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.415264 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftfpv\" (UniqueName: \"kubernetes.io/projected/7dc9beed-8444-4389-8859-234af0090157-kube-api-access-ftfpv\") pod \"7dc9beed-8444-4389-8859-234af0090157\" (UID: \"7dc9beed-8444-4389-8859-234af0090157\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.416559 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7dc9beed-8444-4389-8859-234af0090157-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7dc9beed-8444-4389-8859-234af0090157" (UID: "7dc9beed-8444-4389-8859-234af0090157"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.419231 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dc9beed-8444-4389-8859-234af0090157-kube-api-access-ftfpv" (OuterVolumeSpecName: "kube-api-access-ftfpv") pod "7dc9beed-8444-4389-8859-234af0090157" (UID: "7dc9beed-8444-4389-8859-234af0090157"). InnerVolumeSpecName "kube-api-access-ftfpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.517012 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dc9beed-8444-4389-8859-234af0090157-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.517045 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftfpv\" (UniqueName: \"kubernetes.io/projected/7dc9beed-8444-4389-8859-234af0090157-kube-api-access-ftfpv\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.539968 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" path="/var/lib/kubelet/pods/1a8f14c4-f8bc-4247-b2a2-72aa4801adfa/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.543114 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27f559b3-2c7d-4567-b836-702db66d74ae" path="/var/lib/kubelet/pods/27f559b3-2c7d-4567-b836-702db66d74ae/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.544420 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ad24b50-556b-4799-a598-b7618c1664fd" path="/var/lib/kubelet/pods/2ad24b50-556b-4799-a598-b7618c1664fd/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.545370 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" path="/var/lib/kubelet/pods/36495e7a-b8f8-4d54-a504-e92bb6211327/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.546896 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c3834ac-6796-485b-9dec-e45cebf976df" path="/var/lib/kubelet/pods/3c3834ac-6796-485b-9dec-e45cebf976df/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.548028 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" path="/var/lib/kubelet/pods/3c5e58eb-31a4-4253-8cb9-a9486bb2d955/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.549509 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="465988d0-be74-4295-bb31-4265148803e8" path="/var/lib/kubelet/pods/465988d0-be74-4295-bb31-4265148803e8/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.550101 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa" path="/var/lib/kubelet/pods/54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.550858 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" path="/var/lib/kubelet/pods/68bcf8b3-a271-47f0-9815-17cd3fdaec3e/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.551627 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c4060a4-e264-4a4a-90ea-4a270cc50940" path="/var/lib/kubelet/pods/6c4060a4-e264-4a4a-90ea-4a270cc50940/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.552729 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b959a608-80f8-43f4-81a4-203b9a27467d" path="/var/lib/kubelet/pods/b959a608-80f8-43f4-81a4-203b9a27467d/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.553703 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7fb2588-cb2f-4495-ab6d-4f6aef939caf" path="/var/lib/kubelet/pods/d7fb2588-cb2f-4495-ab6d-4f6aef939caf/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.554565 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddea1ab1-d19b-42f5-833a-6578608d54cc" path="/var/lib/kubelet/pods/ddea1ab1-d19b-42f5-833a-6578608d54cc/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.554907 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" path="/var/lib/kubelet/pods/ef465c53-5add-41ff-9fcc-00e714bc2bc0/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.555553 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef932397-22e9-4d46-90e3-57076299d4cf" path="/var/lib/kubelet/pods/ef932397-22e9-4d46-90e3-57076299d4cf/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.556792 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" path="/var/lib/kubelet/pods/f9ca6eb2-820e-49ea-80ca-bd0e352d4243/volumes" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.557522 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.557551 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.765465 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.925020 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-combined-ca-bundle\") pod \"253a763c-21da-4224-91a2-e3bdc6eca0e9\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.925087 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"253a763c-21da-4224-91a2-e3bdc6eca0e9\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.925117 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-generated\") pod \"253a763c-21da-4224-91a2-e3bdc6eca0e9\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.925189 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-kolla-config\") pod \"253a763c-21da-4224-91a2-e3bdc6eca0e9\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.925277 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-operator-scripts\") pod \"253a763c-21da-4224-91a2-e3bdc6eca0e9\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.925311 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz55r\" (UniqueName: \"kubernetes.io/projected/253a763c-21da-4224-91a2-e3bdc6eca0e9-kube-api-access-qz55r\") pod \"253a763c-21da-4224-91a2-e3bdc6eca0e9\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.925333 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-galera-tls-certs\") pod \"253a763c-21da-4224-91a2-e3bdc6eca0e9\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.925382 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-default\") pod \"253a763c-21da-4224-91a2-e3bdc6eca0e9\" (UID: \"253a763c-21da-4224-91a2-e3bdc6eca0e9\") " Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.926127 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "253a763c-21da-4224-91a2-e3bdc6eca0e9" (UID: "253a763c-21da-4224-91a2-e3bdc6eca0e9"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.926648 4751 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-kolla-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:50 crc kubenswrapper[4751]: E0227 16:50:50.926715 4751 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Feb 27 16:50:50 crc kubenswrapper[4751]: E0227 16:50:50.926765 4751 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data podName:51a81c6a-6814-412d-b77d-e741f1f74446 nodeName:}" failed. No retries permitted until 2026-02-27 16:50:58.92674706 +0000 UTC m=+1621.073761507 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data") pod "rabbitmq-server-0" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446") : configmap "rabbitmq-config-data" not found Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.927601 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "253a763c-21da-4224-91a2-e3bdc6eca0e9" (UID: "253a763c-21da-4224-91a2-e3bdc6eca0e9"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.928758 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "253a763c-21da-4224-91a2-e3bdc6eca0e9" (UID: "253a763c-21da-4224-91a2-e3bdc6eca0e9"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.929236 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "253a763c-21da-4224-91a2-e3bdc6eca0e9" (UID: "253a763c-21da-4224-91a2-e3bdc6eca0e9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.937649 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/253a763c-21da-4224-91a2-e3bdc6eca0e9-kube-api-access-qz55r" (OuterVolumeSpecName: "kube-api-access-qz55r") pod "253a763c-21da-4224-91a2-e3bdc6eca0e9" (UID: "253a763c-21da-4224-91a2-e3bdc6eca0e9"). InnerVolumeSpecName "kube-api-access-qz55r". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.942786 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "mysql-db") pod "253a763c-21da-4224-91a2-e3bdc6eca0e9" (UID: "253a763c-21da-4224-91a2-e3bdc6eca0e9"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.952560 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "253a763c-21da-4224-91a2-e3bdc6eca0e9" (UID: "253a763c-21da-4224-91a2-e3bdc6eca0e9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:50 crc kubenswrapper[4751]: I0227 16:50:50.990809 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "253a763c-21da-4224-91a2-e3bdc6eca0e9" (UID: "253a763c-21da-4224-91a2-e3bdc6eca0e9"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.002612 4751 generic.go:334] "Generic (PLEG): container finished" podID="253a763c-21da-4224-91a2-e3bdc6eca0e9" containerID="48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329" exitCode=0 Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.002743 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.002766 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"253a763c-21da-4224-91a2-e3bdc6eca0e9","Type":"ContainerDied","Data":"48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329"} Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.003153 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"253a763c-21da-4224-91a2-e3bdc6eca0e9","Type":"ContainerDied","Data":"1c6b87f8293f7739572020a0f496a0dba1b8cfc5e6e41dc3f1bac3f62bb80a2f"} Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.003195 4751 scope.go:117] "RemoveContainer" containerID="48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.008643 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-hvcjf" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.007322 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-hvcjf" event={"ID":"7dc9beed-8444-4389-8859-234af0090157","Type":"ContainerDied","Data":"be8e4903b4c126bc47643cbadc02767416367166a4e0b07754ac034095ede4fb"} Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.013963 4751 generic.go:334] "Generic (PLEG): container finished" podID="cecf602c-dec2-40c6-922c-bf84b707b1b9" containerID="549fd5c24da2dfcd4fa0ba0f62c30ff6278b4f64c3189582850edb5093bc8b67" exitCode=0 Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.014037 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"cecf602c-dec2-40c6-922c-bf84b707b1b9","Type":"ContainerDied","Data":"549fd5c24da2dfcd4fa0ba0f62c30ff6278b4f64c3189582850edb5093bc8b67"} Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.028459 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz55r\" (UniqueName: \"kubernetes.io/projected/253a763c-21da-4224-91a2-e3bdc6eca0e9-kube-api-access-qz55r\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.028484 4751 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.028493 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-default\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.028501 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253a763c-21da-4224-91a2-e3bdc6eca0e9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.028521 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.028531 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/253a763c-21da-4224-91a2-e3bdc6eca0e9-config-data-generated\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.029105 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/253a763c-21da-4224-91a2-e3bdc6eca0e9-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.037020 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-hvcjf"] Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.044530 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-hvcjf"] Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.047351 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.057867 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/keystone-6db7c8cdbf-x9xf8" podUID="0cf1e239-243c-4f96-abb6-c3fb850e98e1" containerName="keystone-api" probeResult="failure" output="Get \"https://10.217.0.158:5000/v3\": read tcp 10.217.0.2:43870->10.217.0.158:5000: read: connection reset by peer" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.060862 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.060877 4751 scope.go:117] "RemoveContainer" containerID="a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.065249 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.082985 4751 scope.go:117] "RemoveContainer" containerID="48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329" Feb 27 16:50:51 crc kubenswrapper[4751]: E0227 16:50:51.083830 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329\": container with ID starting with 48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329 not found: ID does not exist" containerID="48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.083859 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329"} err="failed to get container status \"48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329\": rpc error: code = NotFound desc = could not find container \"48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329\": container with ID starting with 48b97b774a06d9dc312d0707fa7310a967a301a1c35ea672aebd0b0cb4aa8329 not found: ID does not exist" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.083880 4751 scope.go:117] "RemoveContainer" containerID="a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019" Feb 27 16:50:51 crc kubenswrapper[4751]: E0227 16:50:51.084258 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019\": container with ID starting with a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019 not found: ID does not exist" containerID="a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.084294 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019"} err="failed to get container status \"a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019\": rpc error: code = NotFound desc = could not find container \"a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019\": container with ID starting with a46f78c9165049b07436a4eb4889db7669b3edc8aafd3369db0aafc70fee6019 not found: ID does not exist" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.084323 4751 scope.go:117] "RemoveContainer" containerID="47acd381e9eb85a84aea0efaae97b586799511423dd5af51c19474800fbeefeb" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.130508 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.237573 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:50:51 crc kubenswrapper[4751]: E0227 16:50:51.337279 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Feb 27 16:50:51 crc kubenswrapper[4751]: E0227 16:50:51.339355 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Feb 27 16:50:51 crc kubenswrapper[4751]: E0227 16:50:51.340476 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Feb 27 16:50:51 crc kubenswrapper[4751]: E0227 16:50:51.340517 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="f501d880-21be-44e3-b015-05b79e226279" containerName="nova-cell0-conductor-conductor" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.358787 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="51a81c6a-6814-412d-b77d-e741f1f74446" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.436158 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hxs5\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-kube-api-access-5hxs5\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.436218 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.437381 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-tls\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.437993 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-server-conf\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.438066 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cecf602c-dec2-40c6-922c-bf84b707b1b9-pod-info\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.438178 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-plugins-conf\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.438239 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-plugins\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.438342 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cecf602c-dec2-40c6-922c-bf84b707b1b9-erlang-cookie-secret\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.438462 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-erlang-cookie\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.438703 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-confd\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.438856 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data\") pod \"cecf602c-dec2-40c6-922c-bf84b707b1b9\" (UID: \"cecf602c-dec2-40c6-922c-bf84b707b1b9\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.439000 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.439062 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.439071 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.439831 4751 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.439876 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.439894 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.462471 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/cecf602c-dec2-40c6-922c-bf84b707b1b9-pod-info" (OuterVolumeSpecName: "pod-info") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.462520 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cecf602c-dec2-40c6-922c-bf84b707b1b9-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.462763 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-kube-api-access-5hxs5" (OuterVolumeSpecName: "kube-api-access-5hxs5") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "kube-api-access-5hxs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.462959 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.463645 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.490603 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data" (OuterVolumeSpecName: "config-data") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.501412 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-server-conf" (OuterVolumeSpecName: "server-conf") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: E0227 16:50:51.523188 4751 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Feb 27 16:50:51 crc kubenswrapper[4751]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2026-02-27T16:50:44Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Feb 27 16:50:51 crc kubenswrapper[4751]: /etc/init.d/functions: line 589: 400 Alarm clock "$@" Feb 27 16:50:51 crc kubenswrapper[4751]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-gdjfm" message=< Feb 27 16:50:51 crc kubenswrapper[4751]: Exiting ovn-controller (1) [FAILED] Feb 27 16:50:51 crc kubenswrapper[4751]: Killing ovn-controller (1) [ OK ] Feb 27 16:50:51 crc kubenswrapper[4751]: 2026-02-27T16:50:44Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Feb 27 16:50:51 crc kubenswrapper[4751]: /etc/init.d/functions: line 589: 400 Alarm clock "$@" Feb 27 16:50:51 crc kubenswrapper[4751]: > Feb 27 16:50:51 crc kubenswrapper[4751]: E0227 16:50:51.523215 4751 kuberuntime_container.go:691] "PreStop hook failed" err=< Feb 27 16:50:51 crc kubenswrapper[4751]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2026-02-27T16:50:44Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Feb 27 16:50:51 crc kubenswrapper[4751]: /etc/init.d/functions: line 589: 400 Alarm clock "$@" Feb 27 16:50:51 crc kubenswrapper[4751]: > pod="openstack/ovn-controller-gdjfm" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerName="ovn-controller" containerID="cri-o://5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.523247 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-gdjfm" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerName="ovn-controller" containerID="cri-o://5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168" gracePeriod=22 Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.544483 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hxs5\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-kube-api-access-5hxs5\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.544521 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.544533 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.544542 4751 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-server-conf\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.544552 4751 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cecf602c-dec2-40c6-922c-bf84b707b1b9-pod-info\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.544561 4751 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cecf602c-dec2-40c6-922c-bf84b707b1b9-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.544569 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cecf602c-dec2-40c6-922c-bf84b707b1b9-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.561042 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.564576 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "cecf602c-dec2-40c6-922c-bf84b707b1b9" (UID: "cecf602c-dec2-40c6-922c-bf84b707b1b9"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.646543 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cecf602c-dec2-40c6-922c-bf84b707b1b9-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.646589 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.685917 4751 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-649c97d5df-x4tkf" podUID="16754588-ca23-484b-b8e8-21bc94c640f3" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.162:9696/\": dial tcp 10.217.0.162:9696: connect: connection refused" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.763021 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.857946 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27twd\" (UniqueName: \"kubernetes.io/projected/0cf1e239-243c-4f96-abb6-c3fb850e98e1-kube-api-access-27twd\") pod \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.858004 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-fernet-keys\") pod \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.858030 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-config-data\") pod \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.858064 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-public-tls-certs\") pod \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.858100 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-scripts\") pod \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.858896 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-internal-tls-certs\") pod \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.858986 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-credential-keys\") pod \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.859042 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-combined-ca-bundle\") pod \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\" (UID: \"0cf1e239-243c-4f96-abb6-c3fb850e98e1\") " Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.884250 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "0cf1e239-243c-4f96-abb6-c3fb850e98e1" (UID: "0cf1e239-243c-4f96-abb6-c3fb850e98e1"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.884270 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "0cf1e239-243c-4f96-abb6-c3fb850e98e1" (UID: "0cf1e239-243c-4f96-abb6-c3fb850e98e1"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.884328 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-scripts" (OuterVolumeSpecName: "scripts") pod "0cf1e239-243c-4f96-abb6-c3fb850e98e1" (UID: "0cf1e239-243c-4f96-abb6-c3fb850e98e1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.884677 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cf1e239-243c-4f96-abb6-c3fb850e98e1-kube-api-access-27twd" (OuterVolumeSpecName: "kube-api-access-27twd") pod "0cf1e239-243c-4f96-abb6-c3fb850e98e1" (UID: "0cf1e239-243c-4f96-abb6-c3fb850e98e1"). InnerVolumeSpecName "kube-api-access-27twd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.888266 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0cf1e239-243c-4f96-abb6-c3fb850e98e1" (UID: "0cf1e239-243c-4f96-abb6-c3fb850e98e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.891598 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-config-data" (OuterVolumeSpecName: "config-data") pod "0cf1e239-243c-4f96-abb6-c3fb850e98e1" (UID: "0cf1e239-243c-4f96-abb6-c3fb850e98e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.916688 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0cf1e239-243c-4f96-abb6-c3fb850e98e1" (UID: "0cf1e239-243c-4f96-abb6-c3fb850e98e1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.919327 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0cf1e239-243c-4f96-abb6-c3fb850e98e1" (UID: "0cf1e239-243c-4f96-abb6-c3fb850e98e1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.960928 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27twd\" (UniqueName: \"kubernetes.io/projected/0cf1e239-243c-4f96-abb6-c3fb850e98e1-kube-api-access-27twd\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.960951 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.960960 4751 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.960970 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.961039 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.961048 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.961056 4751 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-credential-keys\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:51 crc kubenswrapper[4751]: I0227 16:50:51.961063 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cf1e239-243c-4f96-abb6-c3fb850e98e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.062137 4751 generic.go:334] "Generic (PLEG): container finished" podID="0cf1e239-243c-4f96-abb6-c3fb850e98e1" containerID="432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c" exitCode=0 Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.062222 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6db7c8cdbf-x9xf8" event={"ID":"0cf1e239-243c-4f96-abb6-c3fb850e98e1","Type":"ContainerDied","Data":"432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c"} Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.062251 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6db7c8cdbf-x9xf8" event={"ID":"0cf1e239-243c-4f96-abb6-c3fb850e98e1","Type":"ContainerDied","Data":"8e3d2c303580fdf702bd6c9ffc39ae3efede00b5bcb96918b39337fb39f02bce"} Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.062286 4751 scope.go:117] "RemoveContainer" containerID="432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.062413 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6db7c8cdbf-x9xf8" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.075597 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.078882 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-gdjfm_3f29e0f7-8556-4570-a115-1d1ee089479c/ovn-controller/0.log" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.078937 4751 generic.go:334] "Generic (PLEG): container finished" podID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerID="5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168" exitCode=139 Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.079004 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm" event={"ID":"3f29e0f7-8556-4570-a115-1d1ee089479c","Type":"ContainerDied","Data":"5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168"} Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.085459 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168 is running failed: container process not found" containerID="5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.085815 4751 generic.go:334] "Generic (PLEG): container finished" podID="51a81c6a-6814-412d-b77d-e741f1f74446" containerID="56a09490ed1f916c96c436deb77a88d652e14cee4afd925713481445d7f435ab" exitCode=0 Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.085878 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"51a81c6a-6814-412d-b77d-e741f1f74446","Type":"ContainerDied","Data":"56a09490ed1f916c96c436deb77a88d652e14cee4afd925713481445d7f435ab"} Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.085964 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.086829 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168 is running failed: container process not found" containerID="5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.087172 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168 is running failed: container process not found" containerID="5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.087202 4751 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-gdjfm" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerName="ovn-controller" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.093980 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"cecf602c-dec2-40c6-922c-bf84b707b1b9","Type":"ContainerDied","Data":"068368f05d8224f09170f56353afd8659611b4c1686490f92f6144bab5110b28"} Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.094071 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.101883 4751 scope.go:117] "RemoveContainer" containerID="432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c" Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.104804 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c\": container with ID starting with 432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c not found: ID does not exist" containerID="432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.105003 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c"} err="failed to get container status \"432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c\": rpc error: code = NotFound desc = could not find container \"432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c\": container with ID starting with 432cd197f9cae788ffeb84426732d6a096e82727553ed5b6ea7143ce3bd3614c not found: ID does not exist" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.105028 4751 scope.go:117] "RemoveContainer" containerID="56a09490ed1f916c96c436deb77a88d652e14cee4afd925713481445d7f435ab" Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.134569 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.136170 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.136654 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.136690 4751 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.137919 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.146554 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6db7c8cdbf-x9xf8"] Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.156730 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-6db7c8cdbf-x9xf8"] Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.159792 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.162835 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sk5bp\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-kube-api-access-sk5bp\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.162879 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-plugins\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.162942 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-server-conf\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.162973 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-tls\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.163002 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-erlang-cookie\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.163063 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/51a81c6a-6814-412d-b77d-e741f1f74446-erlang-cookie-secret\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.163091 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.163158 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-plugins-conf\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.163226 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.163273 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/51a81c6a-6814-412d-b77d-e741f1f74446-pod-info\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.163319 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-confd\") pod \"51a81c6a-6814-412d-b77d-e741f1f74446\" (UID: \"51a81c6a-6814-412d-b77d-e741f1f74446\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.164345 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.168604 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-kube-api-access-sk5bp" (OuterVolumeSpecName: "kube-api-access-sk5bp") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "kube-api-access-sk5bp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.168710 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.169140 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.169559 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.172706 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.175428 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.176514 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.177296 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51a81c6a-6814-412d-b77d-e741f1f74446-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.178546 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:50:52 crc kubenswrapper[4751]: E0227 16:50:52.178621 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovs-vswitchd" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.193544 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/51a81c6a-6814-412d-b77d-e741f1f74446-pod-info" (OuterVolumeSpecName: "pod-info") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.197431 4751 scope.go:117] "RemoveContainer" containerID="4c6716148a74ea8af28ec00f8d9776e6a9149b4724fe5543af6b7a72f9411e92" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.208772 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data" (OuterVolumeSpecName: "config-data") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.221070 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-server-conf" (OuterVolumeSpecName: "server-conf") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.226778 4751 scope.go:117] "RemoveContainer" containerID="549fd5c24da2dfcd4fa0ba0f62c30ff6278b4f64c3189582850edb5093bc8b67" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.258451 4751 scope.go:117] "RemoveContainer" containerID="90d048165126f4b62e9010d52adea94a9bf9162b44553551cc7d28985890a0a2" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.264859 4751 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-server-conf\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.264993 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.265047 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.265095 4751 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/51a81c6a-6814-412d-b77d-e741f1f74446-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.265142 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.265209 4751 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/51a81c6a-6814-412d-b77d-e741f1f74446-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.265272 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.265321 4751 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/51a81c6a-6814-412d-b77d-e741f1f74446-pod-info\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.265370 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sk5bp\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-kube-api-access-sk5bp\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.265433 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.273502 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "51a81c6a-6814-412d-b77d-e741f1f74446" (UID: "51a81c6a-6814-412d-b77d-e741f1f74446"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.281141 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.324366 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-gdjfm_3f29e0f7-8556-4570-a115-1d1ee089479c/ovn-controller/0.log" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.324457 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.366187 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run\") pod \"3f29e0f7-8556-4570-a115-1d1ee089479c\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.366292 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrqct\" (UniqueName: \"kubernetes.io/projected/3f29e0f7-8556-4570-a115-1d1ee089479c-kube-api-access-mrqct\") pod \"3f29e0f7-8556-4570-a115-1d1ee089479c\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.366359 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-combined-ca-bundle\") pod \"3f29e0f7-8556-4570-a115-1d1ee089479c\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.366390 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run-ovn\") pod \"3f29e0f7-8556-4570-a115-1d1ee089479c\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.366681 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-log-ovn\") pod \"3f29e0f7-8556-4570-a115-1d1ee089479c\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.366681 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run" (OuterVolumeSpecName: "var-run") pod "3f29e0f7-8556-4570-a115-1d1ee089479c" (UID: "3f29e0f7-8556-4570-a115-1d1ee089479c"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.366736 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "3f29e0f7-8556-4570-a115-1d1ee089479c" (UID: "3f29e0f7-8556-4570-a115-1d1ee089479c"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.366772 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-ovn-controller-tls-certs\") pod \"3f29e0f7-8556-4570-a115-1d1ee089479c\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.366834 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "3f29e0f7-8556-4570-a115-1d1ee089479c" (UID: "3f29e0f7-8556-4570-a115-1d1ee089479c"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.366894 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f29e0f7-8556-4570-a115-1d1ee089479c-scripts\") pod \"3f29e0f7-8556-4570-a115-1d1ee089479c\" (UID: \"3f29e0f7-8556-4570-a115-1d1ee089479c\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.367333 4751 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.367357 4751 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.367372 4751 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3f29e0f7-8556-4570-a115-1d1ee089479c-var-log-ovn\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.367387 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.367422 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/51a81c6a-6814-412d-b77d-e741f1f74446-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.369570 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f29e0f7-8556-4570-a115-1d1ee089479c-scripts" (OuterVolumeSpecName: "scripts") pod "3f29e0f7-8556-4570-a115-1d1ee089479c" (UID: "3f29e0f7-8556-4570-a115-1d1ee089479c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.370189 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f29e0f7-8556-4570-a115-1d1ee089479c-kube-api-access-mrqct" (OuterVolumeSpecName: "kube-api-access-mrqct") pod "3f29e0f7-8556-4570-a115-1d1ee089479c" (UID: "3f29e0f7-8556-4570-a115-1d1ee089479c"). InnerVolumeSpecName "kube-api-access-mrqct". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.400232 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f29e0f7-8556-4570-a115-1d1ee089479c" (UID: "3f29e0f7-8556-4570-a115-1d1ee089479c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.431716 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.437474 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.461448 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "3f29e0f7-8556-4570-a115-1d1ee089479c" (UID: "3f29e0f7-8556-4570-a115-1d1ee089479c"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.469376 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f29e0f7-8556-4570-a115-1d1ee089479c-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.469429 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrqct\" (UniqueName: \"kubernetes.io/projected/3f29e0f7-8556-4570-a115-1d1ee089479c-kube-api-access-mrqct\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.469442 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.469451 4751 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f29e0f7-8556-4570-a115-1d1ee089479c-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.539373 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cf1e239-243c-4f96-abb6-c3fb850e98e1" path="/var/lib/kubelet/pods/0cf1e239-243c-4f96-abb6-c3fb850e98e1/volumes" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.540166 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="253a763c-21da-4224-91a2-e3bdc6eca0e9" path="/var/lib/kubelet/pods/253a763c-21da-4224-91a2-e3bdc6eca0e9/volumes" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.541098 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51a81c6a-6814-412d-b77d-e741f1f74446" path="/var/lib/kubelet/pods/51a81c6a-6814-412d-b77d-e741f1f74446/volumes" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.542321 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7555b92d-c801-4da2-8d2e-78fa39c892d2" path="/var/lib/kubelet/pods/7555b92d-c801-4da2-8d2e-78fa39c892d2/volumes" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.542968 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7dc9beed-8444-4389-8859-234af0090157" path="/var/lib/kubelet/pods/7dc9beed-8444-4389-8859-234af0090157/volumes" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.554567 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cecf602c-dec2-40c6-922c-bf84b707b1b9" path="/var/lib/kubelet/pods/cecf602c-dec2-40c6-922c-bf84b707b1b9/volumes" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.598595 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.699815 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-combined-ca-bundle\") pod \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.699921 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6zh4\" (UniqueName: \"kubernetes.io/projected/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-kube-api-access-f6zh4\") pod \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.699976 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-config-data\") pod \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\" (UID: \"2d8d9ed0-8606-47cb-a164-7e6bbac390cd\") " Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.720694 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-kube-api-access-f6zh4" (OuterVolumeSpecName: "kube-api-access-f6zh4") pod "2d8d9ed0-8606-47cb-a164-7e6bbac390cd" (UID: "2d8d9ed0-8606-47cb-a164-7e6bbac390cd"). InnerVolumeSpecName "kube-api-access-f6zh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.728889 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-config-data" (OuterVolumeSpecName: "config-data") pod "2d8d9ed0-8606-47cb-a164-7e6bbac390cd" (UID: "2d8d9ed0-8606-47cb-a164-7e6bbac390cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.733290 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2d8d9ed0-8606-47cb-a164-7e6bbac390cd" (UID: "2d8d9ed0-8606-47cb-a164-7e6bbac390cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.801995 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.802030 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:52 crc kubenswrapper[4751]: I0227 16:50:52.802044 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6zh4\" (UniqueName: \"kubernetes.io/projected/2d8d9ed0-8606-47cb-a164-7e6bbac390cd-kube-api-access-f6zh4\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.077319 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.119569 4751 generic.go:334] "Generic (PLEG): container finished" podID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerID="a9ee4f7f4be2929eae47c5cf12c06d5e1590f223ebab7558c7fbef22ddb4ca6f" exitCode=0 Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.119667 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4785321-8f3e-44cb-833c-0b78bc368cd9","Type":"ContainerDied","Data":"a9ee4f7f4be2929eae47c5cf12c06d5e1590f223ebab7558c7fbef22ddb4ca6f"} Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.122377 4751 generic.go:334] "Generic (PLEG): container finished" podID="f501d880-21be-44e3-b015-05b79e226279" containerID="da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5" exitCode=0 Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.122509 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f501d880-21be-44e3-b015-05b79e226279","Type":"ContainerDied","Data":"da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5"} Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.124652 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-gdjfm_3f29e0f7-8556-4570-a115-1d1ee089479c/ovn-controller/0.log" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.124698 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-gdjfm" event={"ID":"3f29e0f7-8556-4570-a115-1d1ee089479c","Type":"ContainerDied","Data":"1eb85dbb8f2524a4d5419fdd2893031b7e550048413c3dc29f369b1bd2104490"} Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.124720 4751 scope.go:117] "RemoveContainer" containerID="5dcd2bdb2e6d0bb709a42589f714ed014817912f72ec631784e18f15031c3168" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.124851 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-gdjfm" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.135358 4751 generic.go:334] "Generic (PLEG): container finished" podID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" containerID="975963e810405e7a1f164ed08541517bb44532e23c9b968a1511ad894a22d948" exitCode=0 Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.135456 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"23d98e0b-8d21-4ad9-b3a4-716c1d221949","Type":"ContainerDied","Data":"975963e810405e7a1f164ed08541517bb44532e23c9b968a1511ad894a22d948"} Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.136991 4751 generic.go:334] "Generic (PLEG): container finished" podID="2d8d9ed0-8606-47cb-a164-7e6bbac390cd" containerID="2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6" exitCode=0 Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.137041 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"2d8d9ed0-8606-47cb-a164-7e6bbac390cd","Type":"ContainerDied","Data":"2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6"} Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.137064 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"2d8d9ed0-8606-47cb-a164-7e6bbac390cd","Type":"ContainerDied","Data":"acf26164004e25883c932d9f0da8e1b26f131f371308f2367b0539bbdddade95"} Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.137135 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.139514 4751 generic.go:334] "Generic (PLEG): container finished" podID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" containerID="72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe" exitCode=0 Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.139556 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" event={"ID":"a0840d34-f0f3-4bfd-a33c-29cc1e268586","Type":"ContainerDied","Data":"72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe"} Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.139581 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" event={"ID":"a0840d34-f0f3-4bfd-a33c-29cc1e268586","Type":"ContainerDied","Data":"0751aa2cf43b9b59cd932572a646b4b5fd97dcd04a31d419cf30b1b84ab7296a"} Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.139755 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-d9bcd5f6c-zlj75" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.153784 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-gdjfm"] Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.163681 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-gdjfm"] Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.168940 4751 scope.go:117] "RemoveContainer" containerID="2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.182803 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.187379 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.196219 4751 scope.go:117] "RemoveContainer" containerID="2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6" Feb 27 16:50:53 crc kubenswrapper[4751]: E0227 16:50:53.196839 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6\": container with ID starting with 2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6 not found: ID does not exist" containerID="2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.196967 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6"} err="failed to get container status \"2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6\": rpc error: code = NotFound desc = could not find container \"2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6\": container with ID starting with 2447590027e3c3985b0ff486fd0b56c2badc4c6132000ab0f491f2b1f773ddc6 not found: ID does not exist" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.197014 4751 scope.go:117] "RemoveContainer" containerID="72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.210104 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data\") pod \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.210195 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0840d34-f0f3-4bfd-a33c-29cc1e268586-logs\") pod \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.210297 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lkxn\" (UniqueName: \"kubernetes.io/projected/a0840d34-f0f3-4bfd-a33c-29cc1e268586-kube-api-access-6lkxn\") pod \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.210322 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data-custom\") pod \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.210377 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-combined-ca-bundle\") pod \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\" (UID: \"a0840d34-f0f3-4bfd-a33c-29cc1e268586\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.211577 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0840d34-f0f3-4bfd-a33c-29cc1e268586-logs" (OuterVolumeSpecName: "logs") pod "a0840d34-f0f3-4bfd-a33c-29cc1e268586" (UID: "a0840d34-f0f3-4bfd-a33c-29cc1e268586"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.215168 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a0840d34-f0f3-4bfd-a33c-29cc1e268586" (UID: "a0840d34-f0f3-4bfd-a33c-29cc1e268586"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.226111 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.229677 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0840d34-f0f3-4bfd-a33c-29cc1e268586-kube-api-access-6lkxn" (OuterVolumeSpecName: "kube-api-access-6lkxn") pod "a0840d34-f0f3-4bfd-a33c-29cc1e268586" (UID: "a0840d34-f0f3-4bfd-a33c-29cc1e268586"). InnerVolumeSpecName "kube-api-access-6lkxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.237101 4751 scope.go:117] "RemoveContainer" containerID="42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.245704 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0840d34-f0f3-4bfd-a33c-29cc1e268586" (UID: "a0840d34-f0f3-4bfd-a33c-29cc1e268586"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.262672 4751 scope.go:117] "RemoveContainer" containerID="72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe" Feb 27 16:50:53 crc kubenswrapper[4751]: E0227 16:50:53.263161 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe\": container with ID starting with 72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe not found: ID does not exist" containerID="72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.263287 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe"} err="failed to get container status \"72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe\": rpc error: code = NotFound desc = could not find container \"72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe\": container with ID starting with 72fa8d6456780e0ea6b18871704b4dde8c21cb82793e3e3a0122013c628913fe not found: ID does not exist" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.263376 4751 scope.go:117] "RemoveContainer" containerID="42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb" Feb 27 16:50:53 crc kubenswrapper[4751]: E0227 16:50:53.263765 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb\": container with ID starting with 42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb not found: ID does not exist" containerID="42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.263892 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb"} err="failed to get container status \"42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb\": rpc error: code = NotFound desc = could not find container \"42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb\": container with ID starting with 42c57c2184b87bf3c9a09b69ad34b6040b0460f9d28d81ed2e8ed7ba354172eb not found: ID does not exist" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.270384 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data" (OuterVolumeSpecName: "config-data") pod "a0840d34-f0f3-4bfd-a33c-29cc1e268586" (UID: "a0840d34-f0f3-4bfd-a33c-29cc1e268586"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311199 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-combined-ca-bundle\") pod \"b4785321-8f3e-44cb-833c-0b78bc368cd9\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311277 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-config-data\") pod \"b4785321-8f3e-44cb-833c-0b78bc368cd9\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311363 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-log-httpd\") pod \"b4785321-8f3e-44cb-833c-0b78bc368cd9\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311418 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6czv8\" (UniqueName: \"kubernetes.io/projected/b4785321-8f3e-44cb-833c-0b78bc368cd9-kube-api-access-6czv8\") pod \"b4785321-8f3e-44cb-833c-0b78bc368cd9\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311484 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-run-httpd\") pod \"b4785321-8f3e-44cb-833c-0b78bc368cd9\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311532 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-scripts\") pod \"b4785321-8f3e-44cb-833c-0b78bc368cd9\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311557 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-sg-core-conf-yaml\") pod \"b4785321-8f3e-44cb-833c-0b78bc368cd9\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311611 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-ceilometer-tls-certs\") pod \"b4785321-8f3e-44cb-833c-0b78bc368cd9\" (UID: \"b4785321-8f3e-44cb-833c-0b78bc368cd9\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311895 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lkxn\" (UniqueName: \"kubernetes.io/projected/a0840d34-f0f3-4bfd-a33c-29cc1e268586-kube-api-access-6lkxn\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311923 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311934 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311943 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0840d34-f0f3-4bfd-a33c-29cc1e268586-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.311951 4751 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0840d34-f0f3-4bfd-a33c-29cc1e268586-logs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.312740 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b4785321-8f3e-44cb-833c-0b78bc368cd9" (UID: "b4785321-8f3e-44cb-833c-0b78bc368cd9"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.313226 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b4785321-8f3e-44cb-833c-0b78bc368cd9" (UID: "b4785321-8f3e-44cb-833c-0b78bc368cd9"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.315210 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-scripts" (OuterVolumeSpecName: "scripts") pod "b4785321-8f3e-44cb-833c-0b78bc368cd9" (UID: "b4785321-8f3e-44cb-833c-0b78bc368cd9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.316836 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4785321-8f3e-44cb-833c-0b78bc368cd9-kube-api-access-6czv8" (OuterVolumeSpecName: "kube-api-access-6czv8") pod "b4785321-8f3e-44cb-833c-0b78bc368cd9" (UID: "b4785321-8f3e-44cb-833c-0b78bc368cd9"). InnerVolumeSpecName "kube-api-access-6czv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.329835 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b4785321-8f3e-44cb-833c-0b78bc368cd9" (UID: "b4785321-8f3e-44cb-833c-0b78bc368cd9"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.356203 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "b4785321-8f3e-44cb-833c-0b78bc368cd9" (UID: "b4785321-8f3e-44cb-833c-0b78bc368cd9"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.399092 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-config-data" (OuterVolumeSpecName: "config-data") pod "b4785321-8f3e-44cb-833c-0b78bc368cd9" (UID: "b4785321-8f3e-44cb-833c-0b78bc368cd9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.399773 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4785321-8f3e-44cb-833c-0b78bc368cd9" (UID: "b4785321-8f3e-44cb-833c-0b78bc368cd9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.413653 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.413674 4751 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.413684 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6czv8\" (UniqueName: \"kubernetes.io/projected/b4785321-8f3e-44cb-833c-0b78bc368cd9-kube-api-access-6czv8\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.413695 4751 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4785321-8f3e-44cb-833c-0b78bc368cd9-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.413705 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.413713 4751 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.413721 4751 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.413729 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4785321-8f3e-44cb-833c-0b78bc368cd9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.423021 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.515027 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjzj5\" (UniqueName: \"kubernetes.io/projected/23d98e0b-8d21-4ad9-b3a4-716c1d221949-kube-api-access-cjzj5\") pod \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.515095 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-combined-ca-bundle\") pod \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.515131 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/23d98e0b-8d21-4ad9-b3a4-716c1d221949-etc-machine-id\") pod \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.515166 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data-custom\") pod \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.515236 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-scripts\") pod \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.515251 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data\") pod \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\" (UID: \"23d98e0b-8d21-4ad9-b3a4-716c1d221949\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.515795 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/23d98e0b-8d21-4ad9-b3a4-716c1d221949-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "23d98e0b-8d21-4ad9-b3a4-716c1d221949" (UID: "23d98e0b-8d21-4ad9-b3a4-716c1d221949"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.521552 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-scripts" (OuterVolumeSpecName: "scripts") pod "23d98e0b-8d21-4ad9-b3a4-716c1d221949" (UID: "23d98e0b-8d21-4ad9-b3a4-716c1d221949"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.521573 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23d98e0b-8d21-4ad9-b3a4-716c1d221949-kube-api-access-cjzj5" (OuterVolumeSpecName: "kube-api-access-cjzj5") pod "23d98e0b-8d21-4ad9-b3a4-716c1d221949" (UID: "23d98e0b-8d21-4ad9-b3a4-716c1d221949"). InnerVolumeSpecName "kube-api-access-cjzj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.521570 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "23d98e0b-8d21-4ad9-b3a4-716c1d221949" (UID: "23d98e0b-8d21-4ad9-b3a4-716c1d221949"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.556265 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "23d98e0b-8d21-4ad9-b3a4-716c1d221949" (UID: "23d98e0b-8d21-4ad9-b3a4-716c1d221949"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.600569 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data" (OuterVolumeSpecName: "config-data") pod "23d98e0b-8d21-4ad9-b3a4-716c1d221949" (UID: "23d98e0b-8d21-4ad9-b3a4-716c1d221949"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.617313 4751 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/23d98e0b-8d21-4ad9-b3a4-716c1d221949-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.617345 4751 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.617354 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.617363 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.617372 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjzj5\" (UniqueName: \"kubernetes.io/projected/23d98e0b-8d21-4ad9-b3a4-716c1d221949-kube-api-access-cjzj5\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.617383 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23d98e0b-8d21-4ad9-b3a4-716c1d221949-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.629880 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.640677 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-d9bcd5f6c-zlj75"] Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.646152 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-d9bcd5f6c-zlj75"] Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.718363 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-config-data\") pod \"f501d880-21be-44e3-b015-05b79e226279\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.718533 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-combined-ca-bundle\") pod \"f501d880-21be-44e3-b015-05b79e226279\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.718570 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4wwb\" (UniqueName: \"kubernetes.io/projected/f501d880-21be-44e3-b015-05b79e226279-kube-api-access-x4wwb\") pod \"f501d880-21be-44e3-b015-05b79e226279\" (UID: \"f501d880-21be-44e3-b015-05b79e226279\") " Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.722695 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f501d880-21be-44e3-b015-05b79e226279-kube-api-access-x4wwb" (OuterVolumeSpecName: "kube-api-access-x4wwb") pod "f501d880-21be-44e3-b015-05b79e226279" (UID: "f501d880-21be-44e3-b015-05b79e226279"). InnerVolumeSpecName "kube-api-access-x4wwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.735934 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-config-data" (OuterVolumeSpecName: "config-data") pod "f501d880-21be-44e3-b015-05b79e226279" (UID: "f501d880-21be-44e3-b015-05b79e226279"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.739556 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f501d880-21be-44e3-b015-05b79e226279" (UID: "f501d880-21be-44e3-b015-05b79e226279"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.820526 4751 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-config-data\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.820571 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f501d880-21be-44e3-b015-05b79e226279-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:53 crc kubenswrapper[4751]: I0227 16:50:53.820590 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4wwb\" (UniqueName: \"kubernetes.io/projected/f501d880-21be-44e3-b015-05b79e226279-kube-api-access-x4wwb\") on node \"crc\" DevicePath \"\"" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.157970 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.158633 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4785321-8f3e-44cb-833c-0b78bc368cd9","Type":"ContainerDied","Data":"e99657895ac4cea2b6cfd70755ad4bf2ea4e3ad10330c06620ad2d2ed0bab24c"} Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.158695 4751 scope.go:117] "RemoveContainer" containerID="c4a7175c059cf3518ae6eba6d361fbebbc8c52020d2692d9f04fb59309e9cac4" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.166560 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f501d880-21be-44e3-b015-05b79e226279","Type":"ContainerDied","Data":"6854150bcfe69bab52c313f36b167edc2ea7852a5af9744ef353f572a9de7103"} Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.166687 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.169943 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"23d98e0b-8d21-4ad9-b3a4-716c1d221949","Type":"ContainerDied","Data":"86da512d71e64953a8fbea39e3f4571119737e50e0bfe9c059be0c0424c5d93c"} Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.169952 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.197760 4751 scope.go:117] "RemoveContainer" containerID="62ed64131f674628306788d24cfc85250f4581f979fadacc28ddf528b64bebfd" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.218634 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.231294 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.246982 4751 scope.go:117] "RemoveContainer" containerID="a9ee4f7f4be2929eae47c5cf12c06d5e1590f223ebab7558c7fbef22ddb4ca6f" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.256660 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.267905 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.275993 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.279315 4751 scope.go:117] "RemoveContainer" containerID="5ea526479c46d824bbf94a208fd6d3670757ee20a011265cfd59b145eb86cf1e" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.283579 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.300388 4751 scope.go:117] "RemoveContainer" containerID="da16950bef4c8761589f066404058c19ec5943530e74d8711c9cc18e707be9f5" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.321424 4751 scope.go:117] "RemoveContainer" containerID="2ecbec27a7197208f58327c1b614eb58bf364a81b50228cd0b0b7068505b1049" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.341984 4751 scope.go:117] "RemoveContainer" containerID="975963e810405e7a1f164ed08541517bb44532e23c9b968a1511ad894a22d948" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.541263 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" path="/var/lib/kubelet/pods/23d98e0b-8d21-4ad9-b3a4-716c1d221949/volumes" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.543637 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d8d9ed0-8606-47cb-a164-7e6bbac390cd" path="/var/lib/kubelet/pods/2d8d9ed0-8606-47cb-a164-7e6bbac390cd/volumes" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.544602 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" path="/var/lib/kubelet/pods/3f29e0f7-8556-4570-a115-1d1ee089479c/volumes" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.546263 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" path="/var/lib/kubelet/pods/a0840d34-f0f3-4bfd-a33c-29cc1e268586/volumes" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.547220 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" path="/var/lib/kubelet/pods/b4785321-8f3e-44cb-833c-0b78bc368cd9/volumes" Feb 27 16:50:54 crc kubenswrapper[4751]: I0227 16:50:54.548493 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f501d880-21be-44e3-b015-05b79e226279" path="/var/lib/kubelet/pods/f501d880-21be-44e3-b015-05b79e226279/volumes" Feb 27 16:50:57 crc kubenswrapper[4751]: E0227 16:50:57.133350 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:50:57 crc kubenswrapper[4751]: E0227 16:50:57.134338 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:50:57 crc kubenswrapper[4751]: E0227 16:50:57.134749 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:50:57 crc kubenswrapper[4751]: E0227 16:50:57.135043 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:50:57 crc kubenswrapper[4751]: E0227 16:50:57.135064 4751 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" Feb 27 16:50:57 crc kubenswrapper[4751]: E0227 16:50:57.135917 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:50:57 crc kubenswrapper[4751]: E0227 16:50:57.137095 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:50:57 crc kubenswrapper[4751]: E0227 16:50:57.137122 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovs-vswitchd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.038480 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z6tzc"] Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039095 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7555b92d-c801-4da2-8d2e-78fa39c892d2" containerName="nova-scheduler-scheduler" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039117 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7555b92d-c801-4da2-8d2e-78fa39c892d2" containerName="nova-scheduler-scheduler" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039143 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-metadata" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039156 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-metadata" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039174 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef932397-22e9-4d46-90e3-57076299d4cf" containerName="glance-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039187 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef932397-22e9-4d46-90e3-57076299d4cf" containerName="glance-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039215 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d8d9ed0-8606-47cb-a164-7e6bbac390cd" containerName="nova-cell1-conductor-conductor" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039228 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d8d9ed0-8606-47cb-a164-7e6bbac390cd" containerName="nova-cell1-conductor-conductor" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039254 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerName="openstack-network-exporter" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039267 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerName="openstack-network-exporter" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039285 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerName="ovn-controller" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039299 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerName="ovn-controller" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039316 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27f559b3-2c7d-4567-b836-702db66d74ae" containerName="cinder-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039327 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="27f559b3-2c7d-4567-b836-702db66d74ae" containerName="cinder-api" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039351 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" containerName="barbican-worker" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039363 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" containerName="barbican-worker" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039381 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f501d880-21be-44e3-b015-05b79e226279" containerName="nova-cell0-conductor-conductor" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039393 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f501d880-21be-44e3-b015-05b79e226279" containerName="nova-cell0-conductor-conductor" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039436 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cecf602c-dec2-40c6-922c-bf84b707b1b9" containerName="rabbitmq" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039448 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cecf602c-dec2-40c6-922c-bf84b707b1b9" containerName="rabbitmq" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039472 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039484 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-log" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039500 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerName="ovn-northd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039511 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerName="ovn-northd" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039528 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ad24b50-556b-4799-a598-b7618c1664fd" containerName="glance-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039540 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ad24b50-556b-4799-a598-b7618c1664fd" containerName="glance-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039555 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef932397-22e9-4d46-90e3-57076299d4cf" containerName="glance-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039567 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef932397-22e9-4d46-90e3-57076299d4cf" containerName="glance-log" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039606 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="253a763c-21da-4224-91a2-e3bdc6eca0e9" containerName="mysql-bootstrap" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039619 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="253a763c-21da-4224-91a2-e3bdc6eca0e9" containerName="mysql-bootstrap" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039637 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dc9beed-8444-4389-8859-234af0090157" containerName="mariadb-account-create-update" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039649 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dc9beed-8444-4389-8859-234af0090157" containerName="mariadb-account-create-update" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039666 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="sg-core" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039678 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="sg-core" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039690 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51a81c6a-6814-412d-b77d-e741f1f74446" containerName="rabbitmq" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039702 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="51a81c6a-6814-412d-b77d-e741f1f74446" containerName="rabbitmq" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039717 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa" containerName="kube-state-metrics" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039730 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa" containerName="kube-state-metrics" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039749 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="ceilometer-central-agent" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039760 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="ceilometer-central-agent" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039777 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51a81c6a-6814-412d-b77d-e741f1f74446" containerName="setup-container" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039789 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="51a81c6a-6814-412d-b77d-e741f1f74446" containerName="setup-container" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039803 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerName="proxy-server" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039815 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerName="proxy-server" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039830 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerName="barbican-api-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039842 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerName="barbican-api-log" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039855 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerName="proxy-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039866 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerName="proxy-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039880 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ad24b50-556b-4799-a598-b7618c1664fd" containerName="glance-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.039892 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ad24b50-556b-4799-a598-b7618c1664fd" containerName="glance-log" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.039911 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cecf602c-dec2-40c6-922c-bf84b707b1b9" containerName="setup-container" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040069 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cecf602c-dec2-40c6-922c-bf84b707b1b9" containerName="setup-container" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040086 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" containerName="probe" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040097 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" containerName="probe" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040112 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerName="nova-api-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040124 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerName="nova-api-api" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040141 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" containerName="placement-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040152 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" containerName="placement-log" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040171 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" containerName="barbican-worker-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040182 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" containerName="barbican-worker-log" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040201 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cf1e239-243c-4f96-abb6-c3fb850e98e1" containerName="keystone-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040212 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cf1e239-243c-4f96-abb6-c3fb850e98e1" containerName="keystone-api" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040229 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" containerName="cinder-scheduler" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040241 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" containerName="cinder-scheduler" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040262 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerName="nova-api-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040273 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerName="nova-api-log" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040294 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="proxy-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040305 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="proxy-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040319 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="253a763c-21da-4224-91a2-e3bdc6eca0e9" containerName="galera" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040330 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="253a763c-21da-4224-91a2-e3bdc6eca0e9" containerName="galera" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040349 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27f559b3-2c7d-4567-b836-702db66d74ae" containerName="cinder-api-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040362 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="27f559b3-2c7d-4567-b836-702db66d74ae" containerName="cinder-api-log" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040378 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c3834ac-6796-485b-9dec-e45cebf976df" containerName="memcached" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040390 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c3834ac-6796-485b-9dec-e45cebf976df" containerName="memcached" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040478 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="ceilometer-notification-agent" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040491 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="ceilometer-notification-agent" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040512 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dc9beed-8444-4389-8859-234af0090157" containerName="mariadb-account-create-update" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040524 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dc9beed-8444-4389-8859-234af0090157" containerName="mariadb-account-create-update" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040541 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerName="barbican-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040553 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerName="barbican-api" Feb 27 16:50:58 crc kubenswrapper[4751]: E0227 16:50:58.040574 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" containerName="placement-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040586 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" containerName="placement-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040871 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef932397-22e9-4d46-90e3-57076299d4cf" containerName="glance-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040901 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" containerName="barbican-worker" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040920 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="27f559b3-2c7d-4567-b836-702db66d74ae" containerName="cinder-api-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040937 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ad24b50-556b-4799-a598-b7618c1664fd" containerName="glance-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040957 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="7555b92d-c801-4da2-8d2e-78fa39c892d2" containerName="nova-scheduler-scheduler" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040971 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerName="barbican-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.040987 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9ca6eb2-820e-49ea-80ca-bd0e352d4243" containerName="barbican-api-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041001 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="proxy-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041013 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="ceilometer-central-agent" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041032 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerName="ovn-northd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041049 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" containerName="cinder-scheduler" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041068 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="23d98e0b-8d21-4ad9-b3a4-716c1d221949" containerName="probe" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041088 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ad24b50-556b-4799-a598-b7618c1664fd" containerName="glance-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041104 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerName="proxy-server" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041122 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dc9beed-8444-4389-8859-234af0090157" containerName="mariadb-account-create-update" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041137 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="f501d880-21be-44e3-b015-05b79e226279" containerName="nova-cell0-conductor-conductor" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041161 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef465c53-5add-41ff-9fcc-00e714bc2bc0" containerName="proxy-httpd" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041183 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="54d6b0d5-0c96-4fc0-94cb-3b68bafc6daa" containerName="kube-state-metrics" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041201 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="36495e7a-b8f8-4d54-a504-e92bb6211327" containerName="openstack-network-exporter" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041215 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="ceilometer-notification-agent" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041235 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f29e0f7-8556-4570-a115-1d1ee089479c" containerName="ovn-controller" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041252 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0840d34-f0f3-4bfd-a33c-29cc1e268586" containerName="barbican-worker-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041270 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" containerName="placement-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041288 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041302 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4785321-8f3e-44cb-833c-0b78bc368cd9" containerName="sg-core" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041317 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="27f559b3-2c7d-4567-b836-702db66d74ae" containerName="cinder-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041337 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d8d9ed0-8606-47cb-a164-7e6bbac390cd" containerName="nova-cell1-conductor-conductor" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041357 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c3834ac-6796-485b-9dec-e45cebf976df" containerName="memcached" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041374 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c5e58eb-31a4-4253-8cb9-a9486bb2d955" containerName="placement-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041394 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerName="nova-api-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041437 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dc9beed-8444-4389-8859-234af0090157" containerName="mariadb-account-create-update" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041454 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a8f14c4-f8bc-4247-b2a2-72aa4801adfa" containerName="nova-api-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041466 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="253a763c-21da-4224-91a2-e3bdc6eca0e9" containerName="galera" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041483 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="51a81c6a-6814-412d-b77d-e741f1f74446" containerName="rabbitmq" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041496 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef932397-22e9-4d46-90e3-57076299d4cf" containerName="glance-log" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041510 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="cecf602c-dec2-40c6-922c-bf84b707b1b9" containerName="rabbitmq" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041524 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="68bcf8b3-a271-47f0-9815-17cd3fdaec3e" containerName="nova-metadata-metadata" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.041542 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cf1e239-243c-4f96-abb6-c3fb850e98e1" containerName="keystone-api" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.043246 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.063966 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z6tzc"] Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.096749 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-utilities\") pod \"certified-operators-z6tzc\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.096796 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-catalog-content\") pod \"certified-operators-z6tzc\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.096836 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jdd4\" (UniqueName: \"kubernetes.io/projected/28bec30a-be40-4747-8378-a04768380bf9-kube-api-access-2jdd4\") pod \"certified-operators-z6tzc\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.198377 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-utilities\") pod \"certified-operators-z6tzc\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.198436 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-catalog-content\") pod \"certified-operators-z6tzc\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.198471 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jdd4\" (UniqueName: \"kubernetes.io/projected/28bec30a-be40-4747-8378-a04768380bf9-kube-api-access-2jdd4\") pod \"certified-operators-z6tzc\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.199174 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-utilities\") pod \"certified-operators-z6tzc\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.199203 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-catalog-content\") pod \"certified-operators-z6tzc\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.216972 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jdd4\" (UniqueName: \"kubernetes.io/projected/28bec30a-be40-4747-8378-a04768380bf9-kube-api-access-2jdd4\") pod \"certified-operators-z6tzc\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.399580 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:50:58 crc kubenswrapper[4751]: I0227 16:50:58.857217 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z6tzc"] Feb 27 16:50:59 crc kubenswrapper[4751]: I0227 16:50:59.234828 4751 generic.go:334] "Generic (PLEG): container finished" podID="28bec30a-be40-4747-8378-a04768380bf9" containerID="dda2b6b850ddb3c2aa1110d01637f61e822de307737721781602b51298aceb9a" exitCode=0 Feb 27 16:50:59 crc kubenswrapper[4751]: I0227 16:50:59.234889 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6tzc" event={"ID":"28bec30a-be40-4747-8378-a04768380bf9","Type":"ContainerDied","Data":"dda2b6b850ddb3c2aa1110d01637f61e822de307737721781602b51298aceb9a"} Feb 27 16:50:59 crc kubenswrapper[4751]: I0227 16:50:59.235148 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6tzc" event={"ID":"28bec30a-be40-4747-8378-a04768380bf9","Type":"ContainerStarted","Data":"2b861d300807bb728ec6523cd94c39a5a32a2bca34e14ddea98b4a3a0423bb46"} Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.244488 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.260824 4751 generic.go:334] "Generic (PLEG): container finished" podID="28bec30a-be40-4747-8378-a04768380bf9" containerID="21d5c04f44494e86bcdee37882aff9059b708ab70029b24cff1949b69046c337" exitCode=0 Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.260956 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6tzc" event={"ID":"28bec30a-be40-4747-8378-a04768380bf9","Type":"ContainerDied","Data":"21d5c04f44494e86bcdee37882aff9059b708ab70029b24cff1949b69046c337"} Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.264470 4751 generic.go:334] "Generic (PLEG): container finished" podID="16754588-ca23-484b-b8e8-21bc94c640f3" containerID="a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c" exitCode=0 Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.264519 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-649c97d5df-x4tkf" event={"ID":"16754588-ca23-484b-b8e8-21bc94c640f3","Type":"ContainerDied","Data":"a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c"} Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.264552 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-649c97d5df-x4tkf" event={"ID":"16754588-ca23-484b-b8e8-21bc94c640f3","Type":"ContainerDied","Data":"5bc57d33e7d1c40aacf0c0bd89f7bf6fb922749fa26c3e8499dff4cafda1351b"} Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.264572 4751 scope.go:117] "RemoveContainer" containerID="8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.264567 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-649c97d5df-x4tkf" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.305667 4751 scope.go:117] "RemoveContainer" containerID="a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.337608 4751 scope.go:117] "RemoveContainer" containerID="8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7" Feb 27 16:51:01 crc kubenswrapper[4751]: E0227 16:51:01.338132 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7\": container with ID starting with 8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7 not found: ID does not exist" containerID="8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.338160 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7"} err="failed to get container status \"8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7\": rpc error: code = NotFound desc = could not find container \"8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7\": container with ID starting with 8cae1a6a519f4d8cb3bd285b3e459c6d60a2234a0f23af55f5a0cf07199403d7 not found: ID does not exist" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.338179 4751 scope.go:117] "RemoveContainer" containerID="a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c" Feb 27 16:51:01 crc kubenswrapper[4751]: E0227 16:51:01.338552 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c\": container with ID starting with a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c not found: ID does not exist" containerID="a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.338640 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c"} err="failed to get container status \"a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c\": rpc error: code = NotFound desc = could not find container \"a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c\": container with ID starting with a608ca0fcc607ebcc4925dc217e870cd065fd09df04f7caf9fd6c4671876c01c not found: ID does not exist" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.345118 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-internal-tls-certs\") pod \"16754588-ca23-484b-b8e8-21bc94c640f3\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.345216 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hs79p\" (UniqueName: \"kubernetes.io/projected/16754588-ca23-484b-b8e8-21bc94c640f3-kube-api-access-hs79p\") pod \"16754588-ca23-484b-b8e8-21bc94c640f3\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.345245 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-httpd-config\") pod \"16754588-ca23-484b-b8e8-21bc94c640f3\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.345284 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-public-tls-certs\") pod \"16754588-ca23-484b-b8e8-21bc94c640f3\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.345318 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-combined-ca-bundle\") pod \"16754588-ca23-484b-b8e8-21bc94c640f3\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.345454 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-config\") pod \"16754588-ca23-484b-b8e8-21bc94c640f3\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.345510 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-ovndb-tls-certs\") pod \"16754588-ca23-484b-b8e8-21bc94c640f3\" (UID: \"16754588-ca23-484b-b8e8-21bc94c640f3\") " Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.351589 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "16754588-ca23-484b-b8e8-21bc94c640f3" (UID: "16754588-ca23-484b-b8e8-21bc94c640f3"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.351769 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16754588-ca23-484b-b8e8-21bc94c640f3-kube-api-access-hs79p" (OuterVolumeSpecName: "kube-api-access-hs79p") pod "16754588-ca23-484b-b8e8-21bc94c640f3" (UID: "16754588-ca23-484b-b8e8-21bc94c640f3"). InnerVolumeSpecName "kube-api-access-hs79p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.393536 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "16754588-ca23-484b-b8e8-21bc94c640f3" (UID: "16754588-ca23-484b-b8e8-21bc94c640f3"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.399619 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-config" (OuterVolumeSpecName: "config") pod "16754588-ca23-484b-b8e8-21bc94c640f3" (UID: "16754588-ca23-484b-b8e8-21bc94c640f3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.401093 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16754588-ca23-484b-b8e8-21bc94c640f3" (UID: "16754588-ca23-484b-b8e8-21bc94c640f3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.404777 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "16754588-ca23-484b-b8e8-21bc94c640f3" (UID: "16754588-ca23-484b-b8e8-21bc94c640f3"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.434169 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "16754588-ca23-484b-b8e8-21bc94c640f3" (UID: "16754588-ca23-484b-b8e8-21bc94c640f3"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.447699 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.447748 4751 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.447763 4751 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.447777 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hs79p\" (UniqueName: \"kubernetes.io/projected/16754588-ca23-484b-b8e8-21bc94c640f3-kube-api-access-hs79p\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.447790 4751 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-httpd-config\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.447801 4751 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.447813 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16754588-ca23-484b-b8e8-21bc94c640f3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.610237 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-649c97d5df-x4tkf"] Feb 27 16:51:01 crc kubenswrapper[4751]: I0227 16:51:01.615220 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-649c97d5df-x4tkf"] Feb 27 16:51:02 crc kubenswrapper[4751]: E0227 16:51:02.132914 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:51:02 crc kubenswrapper[4751]: E0227 16:51:02.133542 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:51:02 crc kubenswrapper[4751]: E0227 16:51:02.134166 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:51:02 crc kubenswrapper[4751]: E0227 16:51:02.134251 4751 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" Feb 27 16:51:02 crc kubenswrapper[4751]: E0227 16:51:02.134905 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:51:02 crc kubenswrapper[4751]: E0227 16:51:02.136737 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:51:02 crc kubenswrapper[4751]: E0227 16:51:02.138952 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:51:02 crc kubenswrapper[4751]: E0227 16:51:02.139002 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovs-vswitchd" Feb 27 16:51:02 crc kubenswrapper[4751]: I0227 16:51:02.279370 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6tzc" event={"ID":"28bec30a-be40-4747-8378-a04768380bf9","Type":"ContainerStarted","Data":"46c56bbedb55639710216735b4c3f234429c41561badee1bf1bae86f9116c973"} Feb 27 16:51:02 crc kubenswrapper[4751]: I0227 16:51:02.300998 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z6tzc" podStartSLOduration=1.856396003 podStartE2EDuration="4.300977844s" podCreationTimestamp="2026-02-27 16:50:58 +0000 UTC" firstStartedPulling="2026-02-27 16:50:59.239288697 +0000 UTC m=+1621.386303144" lastFinishedPulling="2026-02-27 16:51:01.683870498 +0000 UTC m=+1623.830884985" observedRunningTime="2026-02-27 16:51:02.298541659 +0000 UTC m=+1624.445556116" watchObservedRunningTime="2026-02-27 16:51:02.300977844 +0000 UTC m=+1624.447992301" Feb 27 16:51:02 crc kubenswrapper[4751]: I0227 16:51:02.531574 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16754588-ca23-484b-b8e8-21bc94c640f3" path="/var/lib/kubelet/pods/16754588-ca23-484b-b8e8-21bc94c640f3/volumes" Feb 27 16:51:07 crc kubenswrapper[4751]: E0227 16:51:07.132807 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:51:07 crc kubenswrapper[4751]: E0227 16:51:07.134091 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:51:07 crc kubenswrapper[4751]: E0227 16:51:07.134948 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:51:07 crc kubenswrapper[4751]: E0227 16:51:07.135269 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:51:07 crc kubenswrapper[4751]: E0227 16:51:07.135329 4751 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" Feb 27 16:51:07 crc kubenswrapper[4751]: E0227 16:51:07.137344 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:51:07 crc kubenswrapper[4751]: E0227 16:51:07.140110 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:51:07 crc kubenswrapper[4751]: E0227 16:51:07.140178 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovs-vswitchd" Feb 27 16:51:08 crc kubenswrapper[4751]: I0227 16:51:08.400544 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:51:08 crc kubenswrapper[4751]: I0227 16:51:08.400969 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:51:08 crc kubenswrapper[4751]: I0227 16:51:08.462590 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:51:09 crc kubenswrapper[4751]: I0227 16:51:09.435793 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:51:09 crc kubenswrapper[4751]: I0227 16:51:09.491537 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z6tzc"] Feb 27 16:51:11 crc kubenswrapper[4751]: I0227 16:51:11.388013 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z6tzc" podUID="28bec30a-be40-4747-8378-a04768380bf9" containerName="registry-server" containerID="cri-o://46c56bbedb55639710216735b4c3f234429c41561badee1bf1bae86f9116c973" gracePeriod=2 Feb 27 16:51:12 crc kubenswrapper[4751]: E0227 16:51:12.134035 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:51:12 crc kubenswrapper[4751]: E0227 16:51:12.135180 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:51:12 crc kubenswrapper[4751]: E0227 16:51:12.135639 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Feb 27 16:51:12 crc kubenswrapper[4751]: E0227 16:51:12.135679 4751 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" Feb 27 16:51:12 crc kubenswrapper[4751]: E0227 16:51:12.136289 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:51:12 crc kubenswrapper[4751]: E0227 16:51:12.138215 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:51:12 crc kubenswrapper[4751]: E0227 16:51:12.139854 4751 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Feb 27 16:51:12 crc kubenswrapper[4751]: E0227 16:51:12.139896 4751 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-frvvc" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovs-vswitchd" Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.401758 4751 generic.go:334] "Generic (PLEG): container finished" podID="28bec30a-be40-4747-8378-a04768380bf9" containerID="46c56bbedb55639710216735b4c3f234429c41561badee1bf1bae86f9116c973" exitCode=0 Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.401817 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6tzc" event={"ID":"28bec30a-be40-4747-8378-a04768380bf9","Type":"ContainerDied","Data":"46c56bbedb55639710216735b4c3f234429c41561badee1bf1bae86f9116c973"} Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.507387 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.543173 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-utilities\") pod \"28bec30a-be40-4747-8378-a04768380bf9\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.543263 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jdd4\" (UniqueName: \"kubernetes.io/projected/28bec30a-be40-4747-8378-a04768380bf9-kube-api-access-2jdd4\") pod \"28bec30a-be40-4747-8378-a04768380bf9\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.543309 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-catalog-content\") pod \"28bec30a-be40-4747-8378-a04768380bf9\" (UID: \"28bec30a-be40-4747-8378-a04768380bf9\") " Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.545166 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-utilities" (OuterVolumeSpecName: "utilities") pod "28bec30a-be40-4747-8378-a04768380bf9" (UID: "28bec30a-be40-4747-8378-a04768380bf9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.551016 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28bec30a-be40-4747-8378-a04768380bf9-kube-api-access-2jdd4" (OuterVolumeSpecName: "kube-api-access-2jdd4") pod "28bec30a-be40-4747-8378-a04768380bf9" (UID: "28bec30a-be40-4747-8378-a04768380bf9"). InnerVolumeSpecName "kube-api-access-2jdd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.635372 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28bec30a-be40-4747-8378-a04768380bf9" (UID: "28bec30a-be40-4747-8378-a04768380bf9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.645121 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.645158 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jdd4\" (UniqueName: \"kubernetes.io/projected/28bec30a-be40-4747-8378-a04768380bf9-kube-api-access-2jdd4\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:12 crc kubenswrapper[4751]: I0227 16:51:12.645171 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28bec30a-be40-4747-8378-a04768380bf9-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:13 crc kubenswrapper[4751]: I0227 16:51:13.425594 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6tzc" event={"ID":"28bec30a-be40-4747-8378-a04768380bf9","Type":"ContainerDied","Data":"2b861d300807bb728ec6523cd94c39a5a32a2bca34e14ddea98b4a3a0423bb46"} Feb 27 16:51:13 crc kubenswrapper[4751]: I0227 16:51:13.425691 4751 scope.go:117] "RemoveContainer" containerID="46c56bbedb55639710216735b4c3f234429c41561badee1bf1bae86f9116c973" Feb 27 16:51:13 crc kubenswrapper[4751]: I0227 16:51:13.425951 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z6tzc" Feb 27 16:51:13 crc kubenswrapper[4751]: I0227 16:51:13.566563 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z6tzc"] Feb 27 16:51:13 crc kubenswrapper[4751]: I0227 16:51:13.569738 4751 scope.go:117] "RemoveContainer" containerID="21d5c04f44494e86bcdee37882aff9059b708ab70029b24cff1949b69046c337" Feb 27 16:51:13 crc kubenswrapper[4751]: I0227 16:51:13.582500 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z6tzc"] Feb 27 16:51:13 crc kubenswrapper[4751]: I0227 16:51:13.598486 4751 scope.go:117] "RemoveContainer" containerID="dda2b6b850ddb3c2aa1110d01637f61e822de307737721781602b51298aceb9a" Feb 27 16:51:13 crc kubenswrapper[4751]: I0227 16:51:13.980342 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-frvvc_a888fc6d-a4cc-4bc8-bca1-dafdfed15274/ovs-vswitchd/0.log" Feb 27 16:51:13 crc kubenswrapper[4751]: I0227 16:51:13.981453 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.075427 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-log\") pod \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.075601 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-run\") pod \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.075664 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-log" (OuterVolumeSpecName: "var-log") pod "a888fc6d-a4cc-4bc8-bca1-dafdfed15274" (UID: "a888fc6d-a4cc-4bc8-bca1-dafdfed15274"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.075708 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srfdq\" (UniqueName: \"kubernetes.io/projected/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-kube-api-access-srfdq\") pod \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.075761 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-etc-ovs\") pod \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.075812 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-lib\") pod \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.075817 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-run" (OuterVolumeSpecName: "var-run") pod "a888fc6d-a4cc-4bc8-bca1-dafdfed15274" (UID: "a888fc6d-a4cc-4bc8-bca1-dafdfed15274"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.075872 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "a888fc6d-a4cc-4bc8-bca1-dafdfed15274" (UID: "a888fc6d-a4cc-4bc8-bca1-dafdfed15274"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.075883 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-scripts\") pod \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\" (UID: \"a888fc6d-a4cc-4bc8-bca1-dafdfed15274\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.075914 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-lib" (OuterVolumeSpecName: "var-lib") pod "a888fc6d-a4cc-4bc8-bca1-dafdfed15274" (UID: "a888fc6d-a4cc-4bc8-bca1-dafdfed15274"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.076248 4751 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-etc-ovs\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.076267 4751 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-lib\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.076278 4751 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-log\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.076289 4751 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-var-run\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.077040 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-scripts" (OuterVolumeSpecName: "scripts") pod "a888fc6d-a4cc-4bc8-bca1-dafdfed15274" (UID: "a888fc6d-a4cc-4bc8-bca1-dafdfed15274"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.080963 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-kube-api-access-srfdq" (OuterVolumeSpecName: "kube-api-access-srfdq") pod "a888fc6d-a4cc-4bc8-bca1-dafdfed15274" (UID: "a888fc6d-a4cc-4bc8-bca1-dafdfed15274"). InnerVolumeSpecName "kube-api-access-srfdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.177151 4751 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.177180 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srfdq\" (UniqueName: \"kubernetes.io/projected/a888fc6d-a4cc-4bc8-bca1-dafdfed15274-kube-api-access-srfdq\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.449452 4751 generic.go:334] "Generic (PLEG): container finished" podID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerID="5d9b075940c17ccffe3c35e09be4bf03a3f95fc97562c089e3ed06153ce12e22" exitCode=137 Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.449461 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"5d9b075940c17ccffe3c35e09be4bf03a3f95fc97562c089e3ed06153ce12e22"} Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.453784 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-frvvc_a888fc6d-a4cc-4bc8-bca1-dafdfed15274/ovs-vswitchd/0.log" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.455492 4751 generic.go:334] "Generic (PLEG): container finished" podID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" exitCode=137 Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.455557 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-frvvc" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.455575 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frvvc" event={"ID":"a888fc6d-a4cc-4bc8-bca1-dafdfed15274","Type":"ContainerDied","Data":"3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9"} Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.455610 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frvvc" event={"ID":"a888fc6d-a4cc-4bc8-bca1-dafdfed15274","Type":"ContainerDied","Data":"03459f4abeec407bc2b87c5867eb9a54da4d26b721a60914c4b284d63078e548"} Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.455637 4751 scope.go:117] "RemoveContainer" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.500425 4751 scope.go:117] "RemoveContainer" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.501709 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-frvvc"] Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.507444 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-frvvc"] Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.548937 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28bec30a-be40-4747-8378-a04768380bf9" path="/var/lib/kubelet/pods/28bec30a-be40-4747-8378-a04768380bf9/volumes" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.550717 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" path="/var/lib/kubelet/pods/a888fc6d-a4cc-4bc8-bca1-dafdfed15274/volumes" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.560574 4751 scope.go:117] "RemoveContainer" containerID="03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.603672 4751 scope.go:117] "RemoveContainer" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" Feb 27 16:51:14 crc kubenswrapper[4751]: E0227 16:51:14.604348 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9\": container with ID starting with 3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9 not found: ID does not exist" containerID="3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.604391 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9"} err="failed to get container status \"3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9\": rpc error: code = NotFound desc = could not find container \"3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9\": container with ID starting with 3bad6cb4b6cae2b98bd92a0b135101b5b4625e382ac66a316e3aa9b5a7c43ce9 not found: ID does not exist" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.604501 4751 scope.go:117] "RemoveContainer" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" Feb 27 16:51:14 crc kubenswrapper[4751]: E0227 16:51:14.605300 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47\": container with ID starting with f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 not found: ID does not exist" containerID="f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.605334 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47"} err="failed to get container status \"f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47\": rpc error: code = NotFound desc = could not find container \"f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47\": container with ID starting with f8ac37e845dbb7993842e845dd0da3a12bd77837ed283337b5f1a6c1fed56f47 not found: ID does not exist" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.605354 4751 scope.go:117] "RemoveContainer" containerID="03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c" Feb 27 16:51:14 crc kubenswrapper[4751]: E0227 16:51:14.605830 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c\": container with ID starting with 03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c not found: ID does not exist" containerID="03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.605862 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c"} err="failed to get container status \"03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c\": rpc error: code = NotFound desc = could not find container \"03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c\": container with ID starting with 03c85cfe5079bb6e88c47072171664dd3c1246e3e42c5acd0c4a7e2c76c2055c not found: ID does not exist" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.722124 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.785574 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-combined-ca-bundle\") pod \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.786023 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.786251 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift\") pod \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.786549 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-lock\") pod \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.786816 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zttb\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-kube-api-access-7zttb\") pod \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.786934 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-cache\") pod \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\" (UID: \"2fd9f1bc-399b-4282-a2cf-b76526fcfca5\") " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.795927 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-lock" (OuterVolumeSpecName: "lock") pod "2fd9f1bc-399b-4282-a2cf-b76526fcfca5" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.797922 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "2fd9f1bc-399b-4282-a2cf-b76526fcfca5" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.799573 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-cache" (OuterVolumeSpecName: "cache") pod "2fd9f1bc-399b-4282-a2cf-b76526fcfca5" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.801197 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-kube-api-access-7zttb" (OuterVolumeSpecName: "kube-api-access-7zttb") pod "2fd9f1bc-399b-4282-a2cf-b76526fcfca5" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5"). InnerVolumeSpecName "kube-api-access-7zttb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.805638 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "swift") pod "2fd9f1bc-399b-4282-a2cf-b76526fcfca5" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.888927 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.888958 4751 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-etc-swift\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.888968 4751 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-lock\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.888977 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zttb\" (UniqueName: \"kubernetes.io/projected/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-kube-api-access-7zttb\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.888991 4751 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-cache\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.905242 4751 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Feb 27 16:51:14 crc kubenswrapper[4751]: I0227 16:51:14.990211 4751 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.141689 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2fd9f1bc-399b-4282-a2cf-b76526fcfca5" (UID: "2fd9f1bc-399b-4282-a2cf-b76526fcfca5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.193193 4751 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fd9f1bc-399b-4282-a2cf-b76526fcfca5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.481470 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.481287 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2fd9f1bc-399b-4282-a2cf-b76526fcfca5","Type":"ContainerDied","Data":"9bf4ad609796095aa35d0e2cb9fc5024dce46f642c5dc4c0ecdf09965dcd566f"} Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.481893 4751 scope.go:117] "RemoveContainer" containerID="5d9b075940c17ccffe3c35e09be4bf03a3f95fc97562c089e3ed06153ce12e22" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.519439 4751 scope.go:117] "RemoveContainer" containerID="de66ee2999fac29e22b4821b042f3b6c8bcd8af2215e1895604af96a423ffd6d" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.597839 4751 scope.go:117] "RemoveContainer" containerID="8d3c817e96059d70b12ef7286c0552c07aa2499059b8053476c3d15e8305625f" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.610595 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.622116 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.636086 4751 scope.go:117] "RemoveContainer" containerID="83ce35274bb0792c55293b6011d40b169da183543e50dad4bd7e201d6f6a7146" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.666655 4751 scope.go:117] "RemoveContainer" containerID="23f3e1c514c8b828bd9ae5bb87e214f223a4588483efcabd156863e7581c145e" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.698021 4751 scope.go:117] "RemoveContainer" containerID="d9d8a8e1e05ef6e1c7673a877cb8fbfae2175606b7184f573f2d5f77b2fb1d24" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.726471 4751 scope.go:117] "RemoveContainer" containerID="4ca99524eea4a99a550fc63f10a9bcbfd3c3e4a41fa126a72fb0140d5e9d14f0" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.755182 4751 scope.go:117] "RemoveContainer" containerID="e79d296ac1de0b104dda270fbeebf21a4f77921165bf8426ffb98a3ab9fd68bd" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.792466 4751 scope.go:117] "RemoveContainer" containerID="b93427209a64b6f4800316cc95e813dbfe839f3c1cc330375575973fc2bd09ba" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.821799 4751 scope.go:117] "RemoveContainer" containerID="681fe80f14cb936ac1603940896731427ac9c788d645cc1ee250520ef0030927" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.854984 4751 scope.go:117] "RemoveContainer" containerID="3e77e6f4a377245e2e374c8bf467f0ec059e1247d85be6db7d91b8496e308e18" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.892633 4751 scope.go:117] "RemoveContainer" containerID="3781fd0007798258c29970e2eca3675df69d6304df681043d13fc310c53b5b2d" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.936070 4751 scope.go:117] "RemoveContainer" containerID="a303cad8ff9d037361da17d1a15f7f3b922522d9c253a7d490bbde1a81132839" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.964419 4751 scope.go:117] "RemoveContainer" containerID="4cafaf593ca2edd1eaf7ce55b2075b944bb67896f93d4fa2ddbe908cbb542c69" Feb 27 16:51:15 crc kubenswrapper[4751]: I0227 16:51:15.993866 4751 scope.go:117] "RemoveContainer" containerID="3d28978244e9cafd7b1ee3ade5f195d2ae28102706cdb9083ebd620acc9c5453" Feb 27 16:51:16 crc kubenswrapper[4751]: I0227 16:51:16.535990 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" path="/var/lib/kubelet/pods/2fd9f1bc-399b-4282-a2cf-b76526fcfca5/volumes" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.707940 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p2sq4"] Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.708912 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-updater" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.708927 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-updater" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.708939 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.708947 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.708955 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovs-vswitchd" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.708964 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovs-vswitchd" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.708975 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-replicator" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.708982 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-replicator" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.708997 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16754588-ca23-484b-b8e8-21bc94c640f3" containerName="neutron-api" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709005 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="16754588-ca23-484b-b8e8-21bc94c640f3" containerName="neutron-api" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709018 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-auditor" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709026 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-auditor" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709035 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-server" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709042 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-server" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709053 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28bec30a-be40-4747-8378-a04768380bf9" containerName="extract-content" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709061 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="28bec30a-be40-4747-8378-a04768380bf9" containerName="extract-content" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709078 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-expirer" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709086 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-expirer" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709097 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-updater" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709104 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-updater" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709119 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28bec30a-be40-4747-8378-a04768380bf9" containerName="registry-server" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709127 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="28bec30a-be40-4747-8378-a04768380bf9" containerName="registry-server" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709142 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="swift-recon-cron" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709150 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="swift-recon-cron" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709164 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-replicator" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709171 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-replicator" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709181 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16754588-ca23-484b-b8e8-21bc94c640f3" containerName="neutron-httpd" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709188 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="16754588-ca23-484b-b8e8-21bc94c640f3" containerName="neutron-httpd" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709200 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-auditor" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709207 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-auditor" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709218 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-reaper" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709227 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-reaper" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709241 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-replicator" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709248 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-replicator" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709260 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28bec30a-be40-4747-8378-a04768380bf9" containerName="extract-utilities" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709268 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="28bec30a-be40-4747-8378-a04768380bf9" containerName="extract-utilities" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709284 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-server" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709291 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-server" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709302 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="rsync" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709310 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="rsync" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709322 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server-init" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709330 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server-init" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709337 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-server" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709344 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-server" Feb 27 16:51:28 crc kubenswrapper[4751]: E0227 16:51:28.709352 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-auditor" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709361 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-auditor" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709708 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-server" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709735 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-updater" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709754 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-auditor" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709775 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-updater" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709788 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-auditor" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709800 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="rsync" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709816 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovs-vswitchd" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709828 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-expirer" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709839 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-reaper" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709848 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-replicator" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709862 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="container-auditor" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709875 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a888fc6d-a4cc-4bc8-bca1-dafdfed15274" containerName="ovsdb-server" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709884 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-server" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709901 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="swift-recon-cron" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709913 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-replicator" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709922 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="16754588-ca23-484b-b8e8-21bc94c640f3" containerName="neutron-httpd" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709933 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="object-replicator" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709946 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="16754588-ca23-484b-b8e8-21bc94c640f3" containerName="neutron-api" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709956 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="28bec30a-be40-4747-8378-a04768380bf9" containerName="registry-server" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.709967 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd9f1bc-399b-4282-a2cf-b76526fcfca5" containerName="account-server" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.711256 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.736110 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2sq4"] Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.819354 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-catalog-content\") pod \"redhat-marketplace-p2sq4\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.819536 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7psj2\" (UniqueName: \"kubernetes.io/projected/9475b238-c91e-41c9-b563-faf0f2fa0602-kube-api-access-7psj2\") pod \"redhat-marketplace-p2sq4\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.819618 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-utilities\") pod \"redhat-marketplace-p2sq4\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.920591 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7psj2\" (UniqueName: \"kubernetes.io/projected/9475b238-c91e-41c9-b563-faf0f2fa0602-kube-api-access-7psj2\") pod \"redhat-marketplace-p2sq4\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.920671 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-utilities\") pod \"redhat-marketplace-p2sq4\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.920784 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-catalog-content\") pod \"redhat-marketplace-p2sq4\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.921515 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-catalog-content\") pod \"redhat-marketplace-p2sq4\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.921521 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-utilities\") pod \"redhat-marketplace-p2sq4\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:28 crc kubenswrapper[4751]: I0227 16:51:28.946252 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7psj2\" (UniqueName: \"kubernetes.io/projected/9475b238-c91e-41c9-b563-faf0f2fa0602-kube-api-access-7psj2\") pod \"redhat-marketplace-p2sq4\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:29 crc kubenswrapper[4751]: I0227 16:51:29.037658 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:29 crc kubenswrapper[4751]: I0227 16:51:29.534933 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2sq4"] Feb 27 16:51:29 crc kubenswrapper[4751]: W0227 16:51:29.545145 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9475b238_c91e_41c9_b563_faf0f2fa0602.slice/crio-7e68fb1c7f67ad8af59933d0dbd8405053a9d6f876a4100aef72a3ea8c7338d9 WatchSource:0}: Error finding container 7e68fb1c7f67ad8af59933d0dbd8405053a9d6f876a4100aef72a3ea8c7338d9: Status 404 returned error can't find the container with id 7e68fb1c7f67ad8af59933d0dbd8405053a9d6f876a4100aef72a3ea8c7338d9 Feb 27 16:51:29 crc kubenswrapper[4751]: I0227 16:51:29.673258 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2sq4" event={"ID":"9475b238-c91e-41c9-b563-faf0f2fa0602","Type":"ContainerStarted","Data":"7e68fb1c7f67ad8af59933d0dbd8405053a9d6f876a4100aef72a3ea8c7338d9"} Feb 27 16:51:30 crc kubenswrapper[4751]: I0227 16:51:30.685783 4751 generic.go:334] "Generic (PLEG): container finished" podID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerID="290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d" exitCode=0 Feb 27 16:51:30 crc kubenswrapper[4751]: I0227 16:51:30.685828 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2sq4" event={"ID":"9475b238-c91e-41c9-b563-faf0f2fa0602","Type":"ContainerDied","Data":"290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d"} Feb 27 16:51:32 crc kubenswrapper[4751]: I0227 16:51:32.711552 4751 generic.go:334] "Generic (PLEG): container finished" podID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerID="963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b" exitCode=0 Feb 27 16:51:32 crc kubenswrapper[4751]: I0227 16:51:32.711676 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2sq4" event={"ID":"9475b238-c91e-41c9-b563-faf0f2fa0602","Type":"ContainerDied","Data":"963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b"} Feb 27 16:51:33 crc kubenswrapper[4751]: I0227 16:51:33.724935 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2sq4" event={"ID":"9475b238-c91e-41c9-b563-faf0f2fa0602","Type":"ContainerStarted","Data":"90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d"} Feb 27 16:51:33 crc kubenswrapper[4751]: I0227 16:51:33.754951 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p2sq4" podStartSLOduration=3.278475086 podStartE2EDuration="5.754927365s" podCreationTimestamp="2026-02-27 16:51:28 +0000 UTC" firstStartedPulling="2026-02-27 16:51:30.688007578 +0000 UTC m=+1652.835022025" lastFinishedPulling="2026-02-27 16:51:33.164459857 +0000 UTC m=+1655.311474304" observedRunningTime="2026-02-27 16:51:33.751165535 +0000 UTC m=+1655.898179992" watchObservedRunningTime="2026-02-27 16:51:33.754927365 +0000 UTC m=+1655.901941822" Feb 27 16:51:39 crc kubenswrapper[4751]: I0227 16:51:39.037872 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:39 crc kubenswrapper[4751]: I0227 16:51:39.038853 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:39 crc kubenswrapper[4751]: I0227 16:51:39.107927 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:39 crc kubenswrapper[4751]: I0227 16:51:39.837231 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:39 crc kubenswrapper[4751]: I0227 16:51:39.906845 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2sq4"] Feb 27 16:51:41 crc kubenswrapper[4751]: I0227 16:51:41.939339 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p2sq4" podUID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerName="registry-server" containerID="cri-o://90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d" gracePeriod=2 Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.538579 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.632013 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-catalog-content\") pod \"9475b238-c91e-41c9-b563-faf0f2fa0602\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.632186 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7psj2\" (UniqueName: \"kubernetes.io/projected/9475b238-c91e-41c9-b563-faf0f2fa0602-kube-api-access-7psj2\") pod \"9475b238-c91e-41c9-b563-faf0f2fa0602\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.632269 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-utilities\") pod \"9475b238-c91e-41c9-b563-faf0f2fa0602\" (UID: \"9475b238-c91e-41c9-b563-faf0f2fa0602\") " Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.634765 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-utilities" (OuterVolumeSpecName: "utilities") pod "9475b238-c91e-41c9-b563-faf0f2fa0602" (UID: "9475b238-c91e-41c9-b563-faf0f2fa0602"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.642819 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9475b238-c91e-41c9-b563-faf0f2fa0602-kube-api-access-7psj2" (OuterVolumeSpecName: "kube-api-access-7psj2") pod "9475b238-c91e-41c9-b563-faf0f2fa0602" (UID: "9475b238-c91e-41c9-b563-faf0f2fa0602"). InnerVolumeSpecName "kube-api-access-7psj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.734907 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.734954 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7psj2\" (UniqueName: \"kubernetes.io/projected/9475b238-c91e-41c9-b563-faf0f2fa0602-kube-api-access-7psj2\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.921852 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9475b238-c91e-41c9-b563-faf0f2fa0602" (UID: "9475b238-c91e-41c9-b563-faf0f2fa0602"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.938197 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9475b238-c91e-41c9-b563-faf0f2fa0602-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.953330 4751 generic.go:334] "Generic (PLEG): container finished" podID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerID="90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d" exitCode=0 Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.953440 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2sq4" event={"ID":"9475b238-c91e-41c9-b563-faf0f2fa0602","Type":"ContainerDied","Data":"90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d"} Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.953567 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2sq4" event={"ID":"9475b238-c91e-41c9-b563-faf0f2fa0602","Type":"ContainerDied","Data":"7e68fb1c7f67ad8af59933d0dbd8405053a9d6f876a4100aef72a3ea8c7338d9"} Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.953482 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2sq4" Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.953631 4751 scope.go:117] "RemoveContainer" containerID="90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d" Feb 27 16:51:42 crc kubenswrapper[4751]: I0227 16:51:42.995167 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2sq4"] Feb 27 16:51:43 crc kubenswrapper[4751]: I0227 16:51:43.009350 4751 scope.go:117] "RemoveContainer" containerID="963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b" Feb 27 16:51:43 crc kubenswrapper[4751]: I0227 16:51:43.015138 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2sq4"] Feb 27 16:51:43 crc kubenswrapper[4751]: I0227 16:51:43.042556 4751 scope.go:117] "RemoveContainer" containerID="290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d" Feb 27 16:51:43 crc kubenswrapper[4751]: I0227 16:51:43.074161 4751 scope.go:117] "RemoveContainer" containerID="90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d" Feb 27 16:51:43 crc kubenswrapper[4751]: E0227 16:51:43.074702 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d\": container with ID starting with 90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d not found: ID does not exist" containerID="90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d" Feb 27 16:51:43 crc kubenswrapper[4751]: I0227 16:51:43.074745 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d"} err="failed to get container status \"90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d\": rpc error: code = NotFound desc = could not find container \"90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d\": container with ID starting with 90d5478766fa06ce319ef6fdff01d25f6e76430acabd5bd9bb918007d2939f8d not found: ID does not exist" Feb 27 16:51:43 crc kubenswrapper[4751]: I0227 16:51:43.074778 4751 scope.go:117] "RemoveContainer" containerID="963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b" Feb 27 16:51:43 crc kubenswrapper[4751]: E0227 16:51:43.075355 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b\": container with ID starting with 963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b not found: ID does not exist" containerID="963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b" Feb 27 16:51:43 crc kubenswrapper[4751]: I0227 16:51:43.075383 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b"} err="failed to get container status \"963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b\": rpc error: code = NotFound desc = could not find container \"963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b\": container with ID starting with 963ba17b513fafdaf48e9dca842f12b2f5cf9e3f08598713676de42fdf98ec8b not found: ID does not exist" Feb 27 16:51:43 crc kubenswrapper[4751]: I0227 16:51:43.075476 4751 scope.go:117] "RemoveContainer" containerID="290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d" Feb 27 16:51:43 crc kubenswrapper[4751]: E0227 16:51:43.075937 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d\": container with ID starting with 290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d not found: ID does not exist" containerID="290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d" Feb 27 16:51:43 crc kubenswrapper[4751]: I0227 16:51:43.075993 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d"} err="failed to get container status \"290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d\": rpc error: code = NotFound desc = could not find container \"290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d\": container with ID starting with 290cbdecb3c5df486a91a85354f892847779b98241a07dcfd6ead883c735d19d not found: ID does not exist" Feb 27 16:51:44 crc kubenswrapper[4751]: I0227 16:51:44.531162 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9475b238-c91e-41c9-b563-faf0f2fa0602" path="/var/lib/kubelet/pods/9475b238-c91e-41c9-b563-faf0f2fa0602/volumes" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.164427 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536852-v2d8d"] Feb 27 16:52:00 crc kubenswrapper[4751]: E0227 16:52:00.166978 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerName="extract-utilities" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.167020 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerName="extract-utilities" Feb 27 16:52:00 crc kubenswrapper[4751]: E0227 16:52:00.167054 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerName="registry-server" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.167068 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerName="registry-server" Feb 27 16:52:00 crc kubenswrapper[4751]: E0227 16:52:00.167104 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerName="extract-content" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.167117 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerName="extract-content" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.167388 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="9475b238-c91e-41c9-b563-faf0f2fa0602" containerName="registry-server" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.168115 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536852-v2d8d" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.172111 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.172162 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.172243 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536852-v2d8d"] Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.172121 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.254076 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmjdn\" (UniqueName: \"kubernetes.io/projected/a97ead17-2e26-4c04-bfda-fa3646505664-kube-api-access-bmjdn\") pod \"auto-csr-approver-29536852-v2d8d\" (UID: \"a97ead17-2e26-4c04-bfda-fa3646505664\") " pod="openshift-infra/auto-csr-approver-29536852-v2d8d" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.356596 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmjdn\" (UniqueName: \"kubernetes.io/projected/a97ead17-2e26-4c04-bfda-fa3646505664-kube-api-access-bmjdn\") pod \"auto-csr-approver-29536852-v2d8d\" (UID: \"a97ead17-2e26-4c04-bfda-fa3646505664\") " pod="openshift-infra/auto-csr-approver-29536852-v2d8d" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.380317 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmjdn\" (UniqueName: \"kubernetes.io/projected/a97ead17-2e26-4c04-bfda-fa3646505664-kube-api-access-bmjdn\") pod \"auto-csr-approver-29536852-v2d8d\" (UID: \"a97ead17-2e26-4c04-bfda-fa3646505664\") " pod="openshift-infra/auto-csr-approver-29536852-v2d8d" Feb 27 16:52:00 crc kubenswrapper[4751]: I0227 16:52:00.502938 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536852-v2d8d" Feb 27 16:52:01 crc kubenswrapper[4751]: I0227 16:52:01.029048 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536852-v2d8d"] Feb 27 16:52:01 crc kubenswrapper[4751]: W0227 16:52:01.037867 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda97ead17_2e26_4c04_bfda_fa3646505664.slice/crio-15d9335bc51440822e7a02bb11969109c4fbd91ac75810165ca1b826cd392280 WatchSource:0}: Error finding container 15d9335bc51440822e7a02bb11969109c4fbd91ac75810165ca1b826cd392280: Status 404 returned error can't find the container with id 15d9335bc51440822e7a02bb11969109c4fbd91ac75810165ca1b826cd392280 Feb 27 16:52:01 crc kubenswrapper[4751]: I0227 16:52:01.174678 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536852-v2d8d" event={"ID":"a97ead17-2e26-4c04-bfda-fa3646505664","Type":"ContainerStarted","Data":"15d9335bc51440822e7a02bb11969109c4fbd91ac75810165ca1b826cd392280"} Feb 27 16:52:03 crc kubenswrapper[4751]: I0227 16:52:03.200932 4751 generic.go:334] "Generic (PLEG): container finished" podID="a97ead17-2e26-4c04-bfda-fa3646505664" containerID="e33f8dcfabad6257dca49dcc012b78566d47a9e5059878b90ff84ffe841eb9ca" exitCode=0 Feb 27 16:52:03 crc kubenswrapper[4751]: I0227 16:52:03.201153 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536852-v2d8d" event={"ID":"a97ead17-2e26-4c04-bfda-fa3646505664","Type":"ContainerDied","Data":"e33f8dcfabad6257dca49dcc012b78566d47a9e5059878b90ff84ffe841eb9ca"} Feb 27 16:52:04 crc kubenswrapper[4751]: I0227 16:52:04.567846 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536852-v2d8d" Feb 27 16:52:04 crc kubenswrapper[4751]: I0227 16:52:04.725309 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmjdn\" (UniqueName: \"kubernetes.io/projected/a97ead17-2e26-4c04-bfda-fa3646505664-kube-api-access-bmjdn\") pod \"a97ead17-2e26-4c04-bfda-fa3646505664\" (UID: \"a97ead17-2e26-4c04-bfda-fa3646505664\") " Feb 27 16:52:04 crc kubenswrapper[4751]: I0227 16:52:04.734030 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a97ead17-2e26-4c04-bfda-fa3646505664-kube-api-access-bmjdn" (OuterVolumeSpecName: "kube-api-access-bmjdn") pod "a97ead17-2e26-4c04-bfda-fa3646505664" (UID: "a97ead17-2e26-4c04-bfda-fa3646505664"). InnerVolumeSpecName "kube-api-access-bmjdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:52:04 crc kubenswrapper[4751]: I0227 16:52:04.828076 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmjdn\" (UniqueName: \"kubernetes.io/projected/a97ead17-2e26-4c04-bfda-fa3646505664-kube-api-access-bmjdn\") on node \"crc\" DevicePath \"\"" Feb 27 16:52:05 crc kubenswrapper[4751]: I0227 16:52:05.222618 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536852-v2d8d" event={"ID":"a97ead17-2e26-4c04-bfda-fa3646505664","Type":"ContainerDied","Data":"15d9335bc51440822e7a02bb11969109c4fbd91ac75810165ca1b826cd392280"} Feb 27 16:52:05 crc kubenswrapper[4751]: I0227 16:52:05.222679 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15d9335bc51440822e7a02bb11969109c4fbd91ac75810165ca1b826cd392280" Feb 27 16:52:05 crc kubenswrapper[4751]: I0227 16:52:05.222685 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536852-v2d8d" Feb 27 16:52:05 crc kubenswrapper[4751]: I0227 16:52:05.687841 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536846-d7tbv"] Feb 27 16:52:05 crc kubenswrapper[4751]: I0227 16:52:05.702499 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536846-d7tbv"] Feb 27 16:52:06 crc kubenswrapper[4751]: I0227 16:52:06.538186 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ff1d6b8-692b-4578-91a6-69cda33c3b50" path="/var/lib/kubelet/pods/9ff1d6b8-692b-4578-91a6-69cda33c3b50/volumes" Feb 27 16:52:28 crc kubenswrapper[4751]: I0227 16:52:28.918182 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:52:28 crc kubenswrapper[4751]: I0227 16:52:28.918815 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.689249 4751 scope.go:117] "RemoveContainer" containerID="3fd7fd20d2274bc9adfa8baf3cba840811104b1d4341f4424d646b93404725f3" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.737727 4751 scope.go:117] "RemoveContainer" containerID="897975726e880be0fc1b35ec78227f61767a489580c895d0ac07a1b1d7e35666" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.759431 4751 scope.go:117] "RemoveContainer" containerID="8be581f350c0a6c6b3215f93ef0de633555618e7b76dc24371e057e35a5fd5fa" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.782245 4751 scope.go:117] "RemoveContainer" containerID="140b8fc7115efd7456e187dc7e0388dc53edef9eb6452f05d03914250c59cc20" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.811118 4751 scope.go:117] "RemoveContainer" containerID="fd3db8fe83eca043a38528e17fcc22e812b9c08be75ab024119f3b8e186ffc32" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.828488 4751 scope.go:117] "RemoveContainer" containerID="630f3c9c80ad82fb5029211f5ab0717c1e38f33af63ff70a36579e7512c954e1" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.851101 4751 scope.go:117] "RemoveContainer" containerID="62783f7a506513bf1395f3c504deef531a9030d5bf80991511d067dd84478217" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.878511 4751 scope.go:117] "RemoveContainer" containerID="5257b7d6460134c8a83b3fbef89afd3c3881dadf022442936951b53d49f7804d" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.898270 4751 scope.go:117] "RemoveContainer" containerID="3d08ca10ddfe45db584f322c992bf5201be412cd465225bf79f5f459fc3b794d" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.913988 4751 scope.go:117] "RemoveContainer" containerID="6a73879796cab2d47e005fea1c9dbd41c8bda0273883d4f118c4924ab89c54e2" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.931602 4751 scope.go:117] "RemoveContainer" containerID="e44087f79ab11f751ff8eb2bfe6cb9757dd52869fdc3c821d99e9e86a32e5f6f" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.963626 4751 scope.go:117] "RemoveContainer" containerID="65d85414151eefbb245f3b07b64f200b65b4e9916a138f81be1aac50636a36f4" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.978671 4751 scope.go:117] "RemoveContainer" containerID="c2205f05f4dd6feb20fe78fb9130808228569fa6677a436be7acc32d06637459" Feb 27 16:52:46 crc kubenswrapper[4751]: I0227 16:52:46.993360 4751 scope.go:117] "RemoveContainer" containerID="e69c7cd9f46c7fcbc615d5c40b4025a63c73fd3c765fbd8afc93ab358c87eb2a" Feb 27 16:52:47 crc kubenswrapper[4751]: I0227 16:52:47.019141 4751 scope.go:117] "RemoveContainer" containerID="090f901a8ba3dcff42ed05328e63600b87dc4236801a1d7eac200161a8114a4b" Feb 27 16:52:47 crc kubenswrapper[4751]: I0227 16:52:47.036079 4751 scope.go:117] "RemoveContainer" containerID="0ae2f7b56352c4a7cdff2127e705d4c179feca93ded8ed875ae44c12fa5c38ae" Feb 27 16:52:47 crc kubenswrapper[4751]: I0227 16:52:47.052152 4751 scope.go:117] "RemoveContainer" containerID="a6ae7a3398231202d27c74508110644707211b970edd59282375775aa71a40be" Feb 27 16:52:47 crc kubenswrapper[4751]: I0227 16:52:47.066976 4751 scope.go:117] "RemoveContainer" containerID="8d7ec819993aafb99d822867ec58a5121e57f592645ec15f5a71d216c630dc8b" Feb 27 16:52:47 crc kubenswrapper[4751]: I0227 16:52:47.081007 4751 scope.go:117] "RemoveContainer" containerID="25ba2a6628ec1ee4d5b76b5402988f5b6e113ebdefb932264b55ca80456079a1" Feb 27 16:52:47 crc kubenswrapper[4751]: I0227 16:52:47.108933 4751 scope.go:117] "RemoveContainer" containerID="d07078ead15e772dd400fc2fab10411c832674de9a150659d237e367c26a856a" Feb 27 16:52:58 crc kubenswrapper[4751]: I0227 16:52:58.918157 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:52:58 crc kubenswrapper[4751]: I0227 16:52:58.919033 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:53:28 crc kubenswrapper[4751]: I0227 16:53:28.918501 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 16:53:28 crc kubenswrapper[4751]: I0227 16:53:28.919040 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 16:53:28 crc kubenswrapper[4751]: I0227 16:53:28.919101 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 16:53:28 crc kubenswrapper[4751]: I0227 16:53:28.919913 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 16:53:28 crc kubenswrapper[4751]: I0227 16:53:28.919986 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" gracePeriod=600 Feb 27 16:53:29 crc kubenswrapper[4751]: E0227 16:53:29.652484 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:53:30 crc kubenswrapper[4751]: I0227 16:53:30.128664 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" exitCode=0 Feb 27 16:53:30 crc kubenswrapper[4751]: I0227 16:53:30.128731 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0"} Feb 27 16:53:30 crc kubenswrapper[4751]: I0227 16:53:30.128788 4751 scope.go:117] "RemoveContainer" containerID="62d29c4bd042871716a930e4ba973dc2a54787adada169c002b5efb7ee6d0c17" Feb 27 16:53:30 crc kubenswrapper[4751]: I0227 16:53:30.129367 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:53:30 crc kubenswrapper[4751]: E0227 16:53:30.129745 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:53:41 crc kubenswrapper[4751]: I0227 16:53:41.914574 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:53:41 crc kubenswrapper[4751]: E0227 16:53:41.916372 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:53:47 crc kubenswrapper[4751]: I0227 16:53:47.417901 4751 scope.go:117] "RemoveContainer" containerID="bb211c8ba53260784d253920b3c61cb5553b9d7118a710edd1a842fc4533fd91" Feb 27 16:53:47 crc kubenswrapper[4751]: I0227 16:53:47.483019 4751 scope.go:117] "RemoveContainer" containerID="eceb2023fadf1f97b7f28b5686582ed93f0d37ed691285c381c8b92e120a66bd" Feb 27 16:53:47 crc kubenswrapper[4751]: I0227 16:53:47.522137 4751 scope.go:117] "RemoveContainer" containerID="0f1b9d467c3c6be9ba6bc745147b076f8155ad05ceee660126b0cad365625a05" Feb 27 16:53:47 crc kubenswrapper[4751]: I0227 16:53:47.558595 4751 scope.go:117] "RemoveContainer" containerID="b7da592a3fd5743b745f60dd492e79d806ef290d43c2abc47fe9b2e656118c00" Feb 27 16:53:47 crc kubenswrapper[4751]: I0227 16:53:47.599344 4751 scope.go:117] "RemoveContainer" containerID="8b4d0f68e6478929263c6f04f2a21d33b3783a4100e3d80a0dd129b6b5595f24" Feb 27 16:53:47 crc kubenswrapper[4751]: I0227 16:53:47.645786 4751 scope.go:117] "RemoveContainer" containerID="ccf6a1800c30538fc57cee8fb0f6c11241c62e6a5663f569cec7197d8da7be0d" Feb 27 16:53:56 crc kubenswrapper[4751]: I0227 16:53:56.521309 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:53:56 crc kubenswrapper[4751]: E0227 16:53:56.522123 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.157956 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536854-2ml7s"] Feb 27 16:54:00 crc kubenswrapper[4751]: E0227 16:54:00.161827 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a97ead17-2e26-4c04-bfda-fa3646505664" containerName="oc" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.161850 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="a97ead17-2e26-4c04-bfda-fa3646505664" containerName="oc" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.162089 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="a97ead17-2e26-4c04-bfda-fa3646505664" containerName="oc" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.162693 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536854-2ml7s" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.165873 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.167981 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.169458 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.171339 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536854-2ml7s"] Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.355810 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nzz6\" (UniqueName: \"kubernetes.io/projected/b41c6f8b-2a1f-4b76-8343-4907b52fb82c-kube-api-access-7nzz6\") pod \"auto-csr-approver-29536854-2ml7s\" (UID: \"b41c6f8b-2a1f-4b76-8343-4907b52fb82c\") " pod="openshift-infra/auto-csr-approver-29536854-2ml7s" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.457488 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nzz6\" (UniqueName: \"kubernetes.io/projected/b41c6f8b-2a1f-4b76-8343-4907b52fb82c-kube-api-access-7nzz6\") pod \"auto-csr-approver-29536854-2ml7s\" (UID: \"b41c6f8b-2a1f-4b76-8343-4907b52fb82c\") " pod="openshift-infra/auto-csr-approver-29536854-2ml7s" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.478787 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nzz6\" (UniqueName: \"kubernetes.io/projected/b41c6f8b-2a1f-4b76-8343-4907b52fb82c-kube-api-access-7nzz6\") pod \"auto-csr-approver-29536854-2ml7s\" (UID: \"b41c6f8b-2a1f-4b76-8343-4907b52fb82c\") " pod="openshift-infra/auto-csr-approver-29536854-2ml7s" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.481977 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536854-2ml7s" Feb 27 16:54:00 crc kubenswrapper[4751]: I0227 16:54:00.917106 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536854-2ml7s"] Feb 27 16:54:01 crc kubenswrapper[4751]: I0227 16:54:01.158907 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536854-2ml7s" event={"ID":"b41c6f8b-2a1f-4b76-8343-4907b52fb82c","Type":"ContainerStarted","Data":"3455520bb73494e3d5066d36f748b781c17a05454145ba10a223987416b494b2"} Feb 27 16:54:03 crc kubenswrapper[4751]: I0227 16:54:03.178184 4751 generic.go:334] "Generic (PLEG): container finished" podID="b41c6f8b-2a1f-4b76-8343-4907b52fb82c" containerID="1efdf364fba1948dedcdc72d9b5c9a673f03eb81f377c55f4e2eb61a2994ef00" exitCode=0 Feb 27 16:54:03 crc kubenswrapper[4751]: I0227 16:54:03.178287 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536854-2ml7s" event={"ID":"b41c6f8b-2a1f-4b76-8343-4907b52fb82c","Type":"ContainerDied","Data":"1efdf364fba1948dedcdc72d9b5c9a673f03eb81f377c55f4e2eb61a2994ef00"} Feb 27 16:54:04 crc kubenswrapper[4751]: I0227 16:54:04.500294 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536854-2ml7s" Feb 27 16:54:04 crc kubenswrapper[4751]: I0227 16:54:04.628348 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nzz6\" (UniqueName: \"kubernetes.io/projected/b41c6f8b-2a1f-4b76-8343-4907b52fb82c-kube-api-access-7nzz6\") pod \"b41c6f8b-2a1f-4b76-8343-4907b52fb82c\" (UID: \"b41c6f8b-2a1f-4b76-8343-4907b52fb82c\") " Feb 27 16:54:04 crc kubenswrapper[4751]: I0227 16:54:04.636763 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b41c6f8b-2a1f-4b76-8343-4907b52fb82c-kube-api-access-7nzz6" (OuterVolumeSpecName: "kube-api-access-7nzz6") pod "b41c6f8b-2a1f-4b76-8343-4907b52fb82c" (UID: "b41c6f8b-2a1f-4b76-8343-4907b52fb82c"). InnerVolumeSpecName "kube-api-access-7nzz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:54:04 crc kubenswrapper[4751]: I0227 16:54:04.730671 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nzz6\" (UniqueName: \"kubernetes.io/projected/b41c6f8b-2a1f-4b76-8343-4907b52fb82c-kube-api-access-7nzz6\") on node \"crc\" DevicePath \"\"" Feb 27 16:54:05 crc kubenswrapper[4751]: I0227 16:54:05.195905 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536854-2ml7s" event={"ID":"b41c6f8b-2a1f-4b76-8343-4907b52fb82c","Type":"ContainerDied","Data":"3455520bb73494e3d5066d36f748b781c17a05454145ba10a223987416b494b2"} Feb 27 16:54:05 crc kubenswrapper[4751]: I0227 16:54:05.195943 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3455520bb73494e3d5066d36f748b781c17a05454145ba10a223987416b494b2" Feb 27 16:54:05 crc kubenswrapper[4751]: I0227 16:54:05.196316 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536854-2ml7s" Feb 27 16:54:05 crc kubenswrapper[4751]: I0227 16:54:05.600005 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536848-k5qt8"] Feb 27 16:54:05 crc kubenswrapper[4751]: I0227 16:54:05.610579 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536848-k5qt8"] Feb 27 16:54:06 crc kubenswrapper[4751]: I0227 16:54:06.536681 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fae513e-32f8-4116-a05a-a4cbc62853ec" path="/var/lib/kubelet/pods/9fae513e-32f8-4116-a05a-a4cbc62853ec/volumes" Feb 27 16:54:07 crc kubenswrapper[4751]: I0227 16:54:07.521323 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:54:07 crc kubenswrapper[4751]: E0227 16:54:07.522104 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:54:21 crc kubenswrapper[4751]: I0227 16:54:21.520612 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:54:21 crc kubenswrapper[4751]: E0227 16:54:21.521713 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:54:32 crc kubenswrapper[4751]: I0227 16:54:32.521362 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:54:32 crc kubenswrapper[4751]: E0227 16:54:32.522374 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:54:47 crc kubenswrapper[4751]: I0227 16:54:47.521037 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:54:47 crc kubenswrapper[4751]: E0227 16:54:47.522232 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:54:47 crc kubenswrapper[4751]: I0227 16:54:47.800952 4751 scope.go:117] "RemoveContainer" containerID="34169689f3c52bf3227ed948d0f7bffbaee547666c297ca3f93c8862f93ae755" Feb 27 16:54:47 crc kubenswrapper[4751]: I0227 16:54:47.848527 4751 scope.go:117] "RemoveContainer" containerID="3201776196c961d8ac7518f71ad1a5faf58e200bef29f468b4b7c5610c3cb1c5" Feb 27 16:54:47 crc kubenswrapper[4751]: I0227 16:54:47.933032 4751 scope.go:117] "RemoveContainer" containerID="829485fb76c54a23c8e94741447a2ee195eb2dfd182e82c5103b20b0672bbd8a" Feb 27 16:54:47 crc kubenswrapper[4751]: I0227 16:54:47.961085 4751 scope.go:117] "RemoveContainer" containerID="9545fcf071b000f82de0b521c0d81780bb6933081b5ec65ff1aaedbe49c02474" Feb 27 16:54:47 crc kubenswrapper[4751]: I0227 16:54:47.989078 4751 scope.go:117] "RemoveContainer" containerID="9efb72168b1de3e953c265e1a1a6f4153efc64fb95ad6ba03badc8c64cc24224" Feb 27 16:54:48 crc kubenswrapper[4751]: I0227 16:54:48.019329 4751 scope.go:117] "RemoveContainer" containerID="bd8d3711fc5befd9ca00621b8c304a63586b9645182862b07589ddabffd6d563" Feb 27 16:54:48 crc kubenswrapper[4751]: I0227 16:54:48.039669 4751 scope.go:117] "RemoveContainer" containerID="1fd43654ff8fd38e1d4035a1687be06f7000402fb97029086ff1253d7093fa1c" Feb 27 16:54:48 crc kubenswrapper[4751]: I0227 16:54:48.071160 4751 scope.go:117] "RemoveContainer" containerID="ca497ae2e1618601a7349af12c9fe9d6b80038bec3b56034e3639dec42ca9521" Feb 27 16:54:48 crc kubenswrapper[4751]: I0227 16:54:48.092324 4751 scope.go:117] "RemoveContainer" containerID="92998f2cdf2e9c4313aa0a3ab7697b4d34436e6d3fd22cb366615eb354f3f91a" Feb 27 16:55:00 crc kubenswrapper[4751]: I0227 16:55:00.528191 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:55:00 crc kubenswrapper[4751]: E0227 16:55:00.531462 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:55:14 crc kubenswrapper[4751]: I0227 16:55:14.521260 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:55:14 crc kubenswrapper[4751]: E0227 16:55:14.522513 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:55:29 crc kubenswrapper[4751]: I0227 16:55:29.522242 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:55:29 crc kubenswrapper[4751]: E0227 16:55:29.523446 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:55:43 crc kubenswrapper[4751]: I0227 16:55:43.520388 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:55:43 crc kubenswrapper[4751]: E0227 16:55:43.521793 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:55:48 crc kubenswrapper[4751]: I0227 16:55:48.274286 4751 scope.go:117] "RemoveContainer" containerID="a4d187eda36ea4cc354f922213b9dd6fed260eafa6826d3e2f70a01152740544" Feb 27 16:55:48 crc kubenswrapper[4751]: I0227 16:55:48.322587 4751 scope.go:117] "RemoveContainer" containerID="804834f3315040e77bbf995e6e2b32631baaf772804baf713722a0098da1a0df" Feb 27 16:55:48 crc kubenswrapper[4751]: I0227 16:55:48.371949 4751 scope.go:117] "RemoveContainer" containerID="4d65bcc508221ab8e89426f199f38452c9ba3344c8dcef725f5e51974856df60" Feb 27 16:55:55 crc kubenswrapper[4751]: I0227 16:55:55.520925 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:55:55 crc kubenswrapper[4751]: E0227 16:55:55.522086 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.164257 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536856-db2sg"] Feb 27 16:56:00 crc kubenswrapper[4751]: E0227 16:56:00.165088 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b41c6f8b-2a1f-4b76-8343-4907b52fb82c" containerName="oc" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.165108 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b41c6f8b-2a1f-4b76-8343-4907b52fb82c" containerName="oc" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.165304 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b41c6f8b-2a1f-4b76-8343-4907b52fb82c" containerName="oc" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.165921 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536856-db2sg" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.169739 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.169748 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.169848 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.177458 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536856-db2sg"] Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.256818 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9d8p4\" (UniqueName: \"kubernetes.io/projected/ebeadd22-44dc-445d-a739-41d6a85ca738-kube-api-access-9d8p4\") pod \"auto-csr-approver-29536856-db2sg\" (UID: \"ebeadd22-44dc-445d-a739-41d6a85ca738\") " pod="openshift-infra/auto-csr-approver-29536856-db2sg" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.358243 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9d8p4\" (UniqueName: \"kubernetes.io/projected/ebeadd22-44dc-445d-a739-41d6a85ca738-kube-api-access-9d8p4\") pod \"auto-csr-approver-29536856-db2sg\" (UID: \"ebeadd22-44dc-445d-a739-41d6a85ca738\") " pod="openshift-infra/auto-csr-approver-29536856-db2sg" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.380699 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9d8p4\" (UniqueName: \"kubernetes.io/projected/ebeadd22-44dc-445d-a739-41d6a85ca738-kube-api-access-9d8p4\") pod \"auto-csr-approver-29536856-db2sg\" (UID: \"ebeadd22-44dc-445d-a739-41d6a85ca738\") " pod="openshift-infra/auto-csr-approver-29536856-db2sg" Feb 27 16:56:00 crc kubenswrapper[4751]: I0227 16:56:00.494306 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536856-db2sg" Feb 27 16:56:01 crc kubenswrapper[4751]: I0227 16:56:01.045878 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536856-db2sg"] Feb 27 16:56:01 crc kubenswrapper[4751]: I0227 16:56:01.054982 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 16:56:01 crc kubenswrapper[4751]: I0227 16:56:01.332371 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536856-db2sg" event={"ID":"ebeadd22-44dc-445d-a739-41d6a85ca738","Type":"ContainerStarted","Data":"e161dcd9e6b70f355165d988894f1c04c252cc96b483ab639ab97c3a1ba41717"} Feb 27 16:56:02 crc kubenswrapper[4751]: I0227 16:56:02.346659 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536856-db2sg" event={"ID":"ebeadd22-44dc-445d-a739-41d6a85ca738","Type":"ContainerStarted","Data":"cecf25500477f87bf157cbdb2c8187cccaf59bab938caca58b44dfb2d58e4b56"} Feb 27 16:56:02 crc kubenswrapper[4751]: I0227 16:56:02.378778 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536856-db2sg" podStartSLOduration=1.43645787 podStartE2EDuration="2.378752413s" podCreationTimestamp="2026-02-27 16:56:00 +0000 UTC" firstStartedPulling="2026-02-27 16:56:01.054562175 +0000 UTC m=+1923.201576662" lastFinishedPulling="2026-02-27 16:56:01.996856718 +0000 UTC m=+1924.143871205" observedRunningTime="2026-02-27 16:56:02.364065622 +0000 UTC m=+1924.511080109" watchObservedRunningTime="2026-02-27 16:56:02.378752413 +0000 UTC m=+1924.525766890" Feb 27 16:56:03 crc kubenswrapper[4751]: I0227 16:56:03.358674 4751 generic.go:334] "Generic (PLEG): container finished" podID="ebeadd22-44dc-445d-a739-41d6a85ca738" containerID="cecf25500477f87bf157cbdb2c8187cccaf59bab938caca58b44dfb2d58e4b56" exitCode=0 Feb 27 16:56:03 crc kubenswrapper[4751]: I0227 16:56:03.358725 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536856-db2sg" event={"ID":"ebeadd22-44dc-445d-a739-41d6a85ca738","Type":"ContainerDied","Data":"cecf25500477f87bf157cbdb2c8187cccaf59bab938caca58b44dfb2d58e4b56"} Feb 27 16:56:04 crc kubenswrapper[4751]: I0227 16:56:04.673885 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536856-db2sg" Feb 27 16:56:04 crc kubenswrapper[4751]: I0227 16:56:04.835970 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9d8p4\" (UniqueName: \"kubernetes.io/projected/ebeadd22-44dc-445d-a739-41d6a85ca738-kube-api-access-9d8p4\") pod \"ebeadd22-44dc-445d-a739-41d6a85ca738\" (UID: \"ebeadd22-44dc-445d-a739-41d6a85ca738\") " Feb 27 16:56:04 crc kubenswrapper[4751]: I0227 16:56:04.844676 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebeadd22-44dc-445d-a739-41d6a85ca738-kube-api-access-9d8p4" (OuterVolumeSpecName: "kube-api-access-9d8p4") pod "ebeadd22-44dc-445d-a739-41d6a85ca738" (UID: "ebeadd22-44dc-445d-a739-41d6a85ca738"). InnerVolumeSpecName "kube-api-access-9d8p4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:56:04 crc kubenswrapper[4751]: I0227 16:56:04.937824 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9d8p4\" (UniqueName: \"kubernetes.io/projected/ebeadd22-44dc-445d-a739-41d6a85ca738-kube-api-access-9d8p4\") on node \"crc\" DevicePath \"\"" Feb 27 16:56:05 crc kubenswrapper[4751]: I0227 16:56:05.377376 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536856-db2sg" event={"ID":"ebeadd22-44dc-445d-a739-41d6a85ca738","Type":"ContainerDied","Data":"e161dcd9e6b70f355165d988894f1c04c252cc96b483ab639ab97c3a1ba41717"} Feb 27 16:56:05 crc kubenswrapper[4751]: I0227 16:56:05.377478 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e161dcd9e6b70f355165d988894f1c04c252cc96b483ab639ab97c3a1ba41717" Feb 27 16:56:05 crc kubenswrapper[4751]: I0227 16:56:05.377486 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536856-db2sg" Feb 27 16:56:05 crc kubenswrapper[4751]: I0227 16:56:05.453227 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536850-n6f85"] Feb 27 16:56:05 crc kubenswrapper[4751]: I0227 16:56:05.460027 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536850-n6f85"] Feb 27 16:56:06 crc kubenswrapper[4751]: I0227 16:56:06.533280 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a542e28d-0700-487f-be1c-2d2c982a03f5" path="/var/lib/kubelet/pods/a542e28d-0700-487f-be1c-2d2c982a03f5/volumes" Feb 27 16:56:08 crc kubenswrapper[4751]: I0227 16:56:08.532570 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:56:08 crc kubenswrapper[4751]: E0227 16:56:08.533029 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:56:21 crc kubenswrapper[4751]: I0227 16:56:21.520537 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:56:21 crc kubenswrapper[4751]: E0227 16:56:21.521499 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:56:36 crc kubenswrapper[4751]: I0227 16:56:36.520934 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:56:36 crc kubenswrapper[4751]: E0227 16:56:36.522085 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.360124 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mflxp"] Feb 27 16:56:47 crc kubenswrapper[4751]: E0227 16:56:47.361293 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebeadd22-44dc-445d-a739-41d6a85ca738" containerName="oc" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.361318 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebeadd22-44dc-445d-a739-41d6a85ca738" containerName="oc" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.361651 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebeadd22-44dc-445d-a739-41d6a85ca738" containerName="oc" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.363355 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.383881 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mflxp"] Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.506142 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2hzb\" (UniqueName: \"kubernetes.io/projected/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-kube-api-access-t2hzb\") pod \"redhat-operators-mflxp\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.506213 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-utilities\") pod \"redhat-operators-mflxp\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.506278 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-catalog-content\") pod \"redhat-operators-mflxp\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.607808 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2hzb\" (UniqueName: \"kubernetes.io/projected/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-kube-api-access-t2hzb\") pod \"redhat-operators-mflxp\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.607875 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-utilities\") pod \"redhat-operators-mflxp\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.607917 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-catalog-content\") pod \"redhat-operators-mflxp\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.608433 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-utilities\") pod \"redhat-operators-mflxp\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.608475 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-catalog-content\") pod \"redhat-operators-mflxp\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.636465 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2hzb\" (UniqueName: \"kubernetes.io/projected/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-kube-api-access-t2hzb\") pod \"redhat-operators-mflxp\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:47 crc kubenswrapper[4751]: I0227 16:56:47.710877 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:48 crc kubenswrapper[4751]: I0227 16:56:48.148176 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mflxp"] Feb 27 16:56:48 crc kubenswrapper[4751]: I0227 16:56:48.469526 4751 scope.go:117] "RemoveContainer" containerID="8ce409aa780846399cf6e0d34303969ec916d50693012fdf4ddd01e09ef96144" Feb 27 16:56:48 crc kubenswrapper[4751]: I0227 16:56:48.528146 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:56:48 crc kubenswrapper[4751]: E0227 16:56:48.528372 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:56:48 crc kubenswrapper[4751]: I0227 16:56:48.834177 4751 generic.go:334] "Generic (PLEG): container finished" podID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerID="19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746" exitCode=0 Feb 27 16:56:48 crc kubenswrapper[4751]: I0227 16:56:48.834228 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mflxp" event={"ID":"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c","Type":"ContainerDied","Data":"19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746"} Feb 27 16:56:48 crc kubenswrapper[4751]: I0227 16:56:48.834261 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mflxp" event={"ID":"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c","Type":"ContainerStarted","Data":"29153737e9684f751eecd0c7fc9cfe303ae001f7da8fbfd0af06e1be064edf1e"} Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.589231 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kb49v"] Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.628818 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kb49v"] Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.629273 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.746927 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-catalog-content\") pod \"community-operators-kb49v\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.746985 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-utilities\") pod \"community-operators-kb49v\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.747053 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ftj5\" (UniqueName: \"kubernetes.io/projected/979fc3a9-29bb-4dee-9393-d16007946233-kube-api-access-6ftj5\") pod \"community-operators-kb49v\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.842466 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mflxp" event={"ID":"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c","Type":"ContainerStarted","Data":"49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb"} Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.848286 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-catalog-content\") pod \"community-operators-kb49v\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.848349 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-utilities\") pod \"community-operators-kb49v\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.848370 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ftj5\" (UniqueName: \"kubernetes.io/projected/979fc3a9-29bb-4dee-9393-d16007946233-kube-api-access-6ftj5\") pod \"community-operators-kb49v\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.849017 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-catalog-content\") pod \"community-operators-kb49v\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.849229 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-utilities\") pod \"community-operators-kb49v\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:49 crc kubenswrapper[4751]: I0227 16:56:49.874738 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ftj5\" (UniqueName: \"kubernetes.io/projected/979fc3a9-29bb-4dee-9393-d16007946233-kube-api-access-6ftj5\") pod \"community-operators-kb49v\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:50 crc kubenswrapper[4751]: I0227 16:56:50.012669 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:56:50 crc kubenswrapper[4751]: I0227 16:56:50.514012 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kb49v"] Feb 27 16:56:50 crc kubenswrapper[4751]: W0227 16:56:50.524672 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod979fc3a9_29bb_4dee_9393_d16007946233.slice/crio-2969cc02298adb983835d74f18c6c9efbef2d24fb2992c17c67f6275431e811e WatchSource:0}: Error finding container 2969cc02298adb983835d74f18c6c9efbef2d24fb2992c17c67f6275431e811e: Status 404 returned error can't find the container with id 2969cc02298adb983835d74f18c6c9efbef2d24fb2992c17c67f6275431e811e Feb 27 16:56:50 crc kubenswrapper[4751]: I0227 16:56:50.858801 4751 generic.go:334] "Generic (PLEG): container finished" podID="979fc3a9-29bb-4dee-9393-d16007946233" containerID="890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5" exitCode=0 Feb 27 16:56:50 crc kubenswrapper[4751]: I0227 16:56:50.858873 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kb49v" event={"ID":"979fc3a9-29bb-4dee-9393-d16007946233","Type":"ContainerDied","Data":"890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5"} Feb 27 16:56:50 crc kubenswrapper[4751]: I0227 16:56:50.858946 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kb49v" event={"ID":"979fc3a9-29bb-4dee-9393-d16007946233","Type":"ContainerStarted","Data":"2969cc02298adb983835d74f18c6c9efbef2d24fb2992c17c67f6275431e811e"} Feb 27 16:56:50 crc kubenswrapper[4751]: I0227 16:56:50.865753 4751 generic.go:334] "Generic (PLEG): container finished" podID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerID="49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb" exitCode=0 Feb 27 16:56:50 crc kubenswrapper[4751]: I0227 16:56:50.865790 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mflxp" event={"ID":"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c","Type":"ContainerDied","Data":"49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb"} Feb 27 16:56:51 crc kubenswrapper[4751]: I0227 16:56:51.874889 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kb49v" event={"ID":"979fc3a9-29bb-4dee-9393-d16007946233","Type":"ContainerStarted","Data":"be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44"} Feb 27 16:56:51 crc kubenswrapper[4751]: I0227 16:56:51.879786 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mflxp" event={"ID":"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c","Type":"ContainerStarted","Data":"d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169"} Feb 27 16:56:51 crc kubenswrapper[4751]: I0227 16:56:51.927693 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mflxp" podStartSLOduration=2.504672894 podStartE2EDuration="4.92766549s" podCreationTimestamp="2026-02-27 16:56:47 +0000 UTC" firstStartedPulling="2026-02-27 16:56:48.835847191 +0000 UTC m=+1970.982861658" lastFinishedPulling="2026-02-27 16:56:51.258839767 +0000 UTC m=+1973.405854254" observedRunningTime="2026-02-27 16:56:51.920271684 +0000 UTC m=+1974.067286141" watchObservedRunningTime="2026-02-27 16:56:51.92766549 +0000 UTC m=+1974.074679947" Feb 27 16:56:52 crc kubenswrapper[4751]: I0227 16:56:52.890134 4751 generic.go:334] "Generic (PLEG): container finished" podID="979fc3a9-29bb-4dee-9393-d16007946233" containerID="be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44" exitCode=0 Feb 27 16:56:52 crc kubenswrapper[4751]: I0227 16:56:52.890256 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kb49v" event={"ID":"979fc3a9-29bb-4dee-9393-d16007946233","Type":"ContainerDied","Data":"be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44"} Feb 27 16:56:53 crc kubenswrapper[4751]: I0227 16:56:53.899663 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kb49v" event={"ID":"979fc3a9-29bb-4dee-9393-d16007946233","Type":"ContainerStarted","Data":"2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d"} Feb 27 16:56:53 crc kubenswrapper[4751]: I0227 16:56:53.925698 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kb49v" podStartSLOduration=2.488895581 podStartE2EDuration="4.925676754s" podCreationTimestamp="2026-02-27 16:56:49 +0000 UTC" firstStartedPulling="2026-02-27 16:56:50.862744584 +0000 UTC m=+1973.009759041" lastFinishedPulling="2026-02-27 16:56:53.299525727 +0000 UTC m=+1975.446540214" observedRunningTime="2026-02-27 16:56:53.922647723 +0000 UTC m=+1976.069662170" watchObservedRunningTime="2026-02-27 16:56:53.925676754 +0000 UTC m=+1976.072691201" Feb 27 16:56:57 crc kubenswrapper[4751]: I0227 16:56:57.711593 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:57 crc kubenswrapper[4751]: I0227 16:56:57.711984 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:56:58 crc kubenswrapper[4751]: I0227 16:56:58.835236 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mflxp" podUID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerName="registry-server" probeResult="failure" output=< Feb 27 16:56:58 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 16:56:58 crc kubenswrapper[4751]: > Feb 27 16:56:59 crc kubenswrapper[4751]: I0227 16:56:59.520892 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:56:59 crc kubenswrapper[4751]: E0227 16:56:59.521273 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:57:00 crc kubenswrapper[4751]: I0227 16:57:00.012934 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:57:00 crc kubenswrapper[4751]: I0227 16:57:00.013795 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:57:00 crc kubenswrapper[4751]: I0227 16:57:00.075693 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:57:01 crc kubenswrapper[4751]: I0227 16:57:01.024238 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:57:01 crc kubenswrapper[4751]: I0227 16:57:01.100726 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kb49v"] Feb 27 16:57:02 crc kubenswrapper[4751]: I0227 16:57:02.977880 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kb49v" podUID="979fc3a9-29bb-4dee-9393-d16007946233" containerName="registry-server" containerID="cri-o://2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d" gracePeriod=2 Feb 27 16:57:03 crc kubenswrapper[4751]: I0227 16:57:03.497639 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:57:03 crc kubenswrapper[4751]: I0227 16:57:03.676306 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-utilities\") pod \"979fc3a9-29bb-4dee-9393-d16007946233\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " Feb 27 16:57:03 crc kubenswrapper[4751]: I0227 16:57:03.676576 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ftj5\" (UniqueName: \"kubernetes.io/projected/979fc3a9-29bb-4dee-9393-d16007946233-kube-api-access-6ftj5\") pod \"979fc3a9-29bb-4dee-9393-d16007946233\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " Feb 27 16:57:03 crc kubenswrapper[4751]: I0227 16:57:03.676656 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-catalog-content\") pod \"979fc3a9-29bb-4dee-9393-d16007946233\" (UID: \"979fc3a9-29bb-4dee-9393-d16007946233\") " Feb 27 16:57:03 crc kubenswrapper[4751]: I0227 16:57:03.679797 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-utilities" (OuterVolumeSpecName: "utilities") pod "979fc3a9-29bb-4dee-9393-d16007946233" (UID: "979fc3a9-29bb-4dee-9393-d16007946233"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:57:03 crc kubenswrapper[4751]: I0227 16:57:03.680608 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:57:03 crc kubenswrapper[4751]: I0227 16:57:03.701707 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/979fc3a9-29bb-4dee-9393-d16007946233-kube-api-access-6ftj5" (OuterVolumeSpecName: "kube-api-access-6ftj5") pod "979fc3a9-29bb-4dee-9393-d16007946233" (UID: "979fc3a9-29bb-4dee-9393-d16007946233"). InnerVolumeSpecName "kube-api-access-6ftj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:57:03 crc kubenswrapper[4751]: I0227 16:57:03.755242 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "979fc3a9-29bb-4dee-9393-d16007946233" (UID: "979fc3a9-29bb-4dee-9393-d16007946233"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:57:03 crc kubenswrapper[4751]: I0227 16:57:03.782249 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/979fc3a9-29bb-4dee-9393-d16007946233-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:57:03 crc kubenswrapper[4751]: I0227 16:57:03.782285 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ftj5\" (UniqueName: \"kubernetes.io/projected/979fc3a9-29bb-4dee-9393-d16007946233-kube-api-access-6ftj5\") on node \"crc\" DevicePath \"\"" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.008678 4751 generic.go:334] "Generic (PLEG): container finished" podID="979fc3a9-29bb-4dee-9393-d16007946233" containerID="2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d" exitCode=0 Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.008745 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kb49v" event={"ID":"979fc3a9-29bb-4dee-9393-d16007946233","Type":"ContainerDied","Data":"2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d"} Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.008794 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kb49v" event={"ID":"979fc3a9-29bb-4dee-9393-d16007946233","Type":"ContainerDied","Data":"2969cc02298adb983835d74f18c6c9efbef2d24fb2992c17c67f6275431e811e"} Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.008863 4751 scope.go:117] "RemoveContainer" containerID="2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.008786 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kb49v" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.049731 4751 scope.go:117] "RemoveContainer" containerID="be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.072292 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kb49v"] Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.087458 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kb49v"] Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.098549 4751 scope.go:117] "RemoveContainer" containerID="890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.132557 4751 scope.go:117] "RemoveContainer" containerID="2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d" Feb 27 16:57:04 crc kubenswrapper[4751]: E0227 16:57:04.133263 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d\": container with ID starting with 2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d not found: ID does not exist" containerID="2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.133303 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d"} err="failed to get container status \"2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d\": rpc error: code = NotFound desc = could not find container \"2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d\": container with ID starting with 2faac6ed2dfbb13091bf5b790dfd5c56f097fb29b64cd3479b444ce1e0d2281d not found: ID does not exist" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.133331 4751 scope.go:117] "RemoveContainer" containerID="be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44" Feb 27 16:57:04 crc kubenswrapper[4751]: E0227 16:57:04.134555 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44\": container with ID starting with be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44 not found: ID does not exist" containerID="be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.135501 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44"} err="failed to get container status \"be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44\": rpc error: code = NotFound desc = could not find container \"be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44\": container with ID starting with be15b242bfd92042b555c26f76d6860a1ddc50838700e9558ff7c4faa0a59e44 not found: ID does not exist" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.135706 4751 scope.go:117] "RemoveContainer" containerID="890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5" Feb 27 16:57:04 crc kubenswrapper[4751]: E0227 16:57:04.136326 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5\": container with ID starting with 890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5 not found: ID does not exist" containerID="890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.136388 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5"} err="failed to get container status \"890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5\": rpc error: code = NotFound desc = could not find container \"890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5\": container with ID starting with 890fd52aede17a2b6e74ffc3ce1f85dbc7731131c448797b8291546c352245a5 not found: ID does not exist" Feb 27 16:57:04 crc kubenswrapper[4751]: I0227 16:57:04.534326 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="979fc3a9-29bb-4dee-9393-d16007946233" path="/var/lib/kubelet/pods/979fc3a9-29bb-4dee-9393-d16007946233/volumes" Feb 27 16:57:07 crc kubenswrapper[4751]: I0227 16:57:07.790310 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:57:07 crc kubenswrapper[4751]: I0227 16:57:07.869036 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:57:08 crc kubenswrapper[4751]: I0227 16:57:08.043566 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mflxp"] Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.058291 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mflxp" podUID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerName="registry-server" containerID="cri-o://d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169" gracePeriod=2 Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.527765 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.691524 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-catalog-content\") pod \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.691653 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2hzb\" (UniqueName: \"kubernetes.io/projected/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-kube-api-access-t2hzb\") pod \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.691704 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-utilities\") pod \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\" (UID: \"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c\") " Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.693325 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-utilities" (OuterVolumeSpecName: "utilities") pod "7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" (UID: "7cbcbeaa-ede2-4e97-9a6f-1db15064af7c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.695348 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.697643 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-kube-api-access-t2hzb" (OuterVolumeSpecName: "kube-api-access-t2hzb") pod "7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" (UID: "7cbcbeaa-ede2-4e97-9a6f-1db15064af7c"). InnerVolumeSpecName "kube-api-access-t2hzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.797429 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2hzb\" (UniqueName: \"kubernetes.io/projected/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-kube-api-access-t2hzb\") on node \"crc\" DevicePath \"\"" Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.831908 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" (UID: "7cbcbeaa-ede2-4e97-9a6f-1db15064af7c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 16:57:09 crc kubenswrapper[4751]: I0227 16:57:09.899545 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.071503 4751 generic.go:334] "Generic (PLEG): container finished" podID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerID="d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169" exitCode=0 Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.071560 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mflxp" event={"ID":"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c","Type":"ContainerDied","Data":"d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169"} Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.071568 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mflxp" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.071590 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mflxp" event={"ID":"7cbcbeaa-ede2-4e97-9a6f-1db15064af7c","Type":"ContainerDied","Data":"29153737e9684f751eecd0c7fc9cfe303ae001f7da8fbfd0af06e1be064edf1e"} Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.071611 4751 scope.go:117] "RemoveContainer" containerID="d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.125597 4751 scope.go:117] "RemoveContainer" containerID="49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.131206 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mflxp"] Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.135502 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mflxp"] Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.156229 4751 scope.go:117] "RemoveContainer" containerID="19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.186339 4751 scope.go:117] "RemoveContainer" containerID="d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169" Feb 27 16:57:10 crc kubenswrapper[4751]: E0227 16:57:10.186914 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169\": container with ID starting with d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169 not found: ID does not exist" containerID="d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.186974 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169"} err="failed to get container status \"d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169\": rpc error: code = NotFound desc = could not find container \"d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169\": container with ID starting with d277115def547410e1c0d6f98c56e4de6bf8541cc051355b9aa19107e1f29169 not found: ID does not exist" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.187021 4751 scope.go:117] "RemoveContainer" containerID="49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb" Feb 27 16:57:10 crc kubenswrapper[4751]: E0227 16:57:10.187430 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb\": container with ID starting with 49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb not found: ID does not exist" containerID="49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.187499 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb"} err="failed to get container status \"49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb\": rpc error: code = NotFound desc = could not find container \"49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb\": container with ID starting with 49f110af7ffc3a3eed6e27bf2968a8f7e5486e9e66ba6e747464c41f78ac45fb not found: ID does not exist" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.187524 4751 scope.go:117] "RemoveContainer" containerID="19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746" Feb 27 16:57:10 crc kubenswrapper[4751]: E0227 16:57:10.187860 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746\": container with ID starting with 19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746 not found: ID does not exist" containerID="19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.187898 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746"} err="failed to get container status \"19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746\": rpc error: code = NotFound desc = could not find container \"19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746\": container with ID starting with 19d0d2c852a446b8ba8b7057647f7e7328ca26c3d7a721f46469364db458b746 not found: ID does not exist" Feb 27 16:57:10 crc kubenswrapper[4751]: I0227 16:57:10.531368 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" path="/var/lib/kubelet/pods/7cbcbeaa-ede2-4e97-9a6f-1db15064af7c/volumes" Feb 27 16:57:11 crc kubenswrapper[4751]: I0227 16:57:11.520439 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:57:11 crc kubenswrapper[4751]: E0227 16:57:11.521207 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:57:26 crc kubenswrapper[4751]: I0227 16:57:26.521756 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:57:26 crc kubenswrapper[4751]: E0227 16:57:26.522962 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:57:40 crc kubenswrapper[4751]: I0227 16:57:40.532986 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:57:40 crc kubenswrapper[4751]: E0227 16:57:40.535068 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:57:53 crc kubenswrapper[4751]: I0227 16:57:53.521171 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:57:53 crc kubenswrapper[4751]: E0227 16:57:53.522368 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.163844 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536858-wjm4x"] Feb 27 16:58:00 crc kubenswrapper[4751]: E0227 16:58:00.164987 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerName="extract-content" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.165010 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerName="extract-content" Feb 27 16:58:00 crc kubenswrapper[4751]: E0227 16:58:00.165036 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerName="extract-utilities" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.165048 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerName="extract-utilities" Feb 27 16:58:00 crc kubenswrapper[4751]: E0227 16:58:00.165080 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="979fc3a9-29bb-4dee-9393-d16007946233" containerName="extract-utilities" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.165093 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="979fc3a9-29bb-4dee-9393-d16007946233" containerName="extract-utilities" Feb 27 16:58:00 crc kubenswrapper[4751]: E0227 16:58:00.165110 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="979fc3a9-29bb-4dee-9393-d16007946233" containerName="registry-server" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.165122 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="979fc3a9-29bb-4dee-9393-d16007946233" containerName="registry-server" Feb 27 16:58:00 crc kubenswrapper[4751]: E0227 16:58:00.165159 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerName="registry-server" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.165173 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerName="registry-server" Feb 27 16:58:00 crc kubenswrapper[4751]: E0227 16:58:00.165198 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="979fc3a9-29bb-4dee-9393-d16007946233" containerName="extract-content" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.165210 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="979fc3a9-29bb-4dee-9393-d16007946233" containerName="extract-content" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.165509 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cbcbeaa-ede2-4e97-9a6f-1db15064af7c" containerName="registry-server" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.165536 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="979fc3a9-29bb-4dee-9393-d16007946233" containerName="registry-server" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.166359 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536858-wjm4x" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.169602 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.174695 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.174851 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.183971 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtswg\" (UniqueName: \"kubernetes.io/projected/963b1d1a-a6ba-49e0-b459-e1069fc5df85-kube-api-access-gtswg\") pod \"auto-csr-approver-29536858-wjm4x\" (UID: \"963b1d1a-a6ba-49e0-b459-e1069fc5df85\") " pod="openshift-infra/auto-csr-approver-29536858-wjm4x" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.190363 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536858-wjm4x"] Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.285878 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtswg\" (UniqueName: \"kubernetes.io/projected/963b1d1a-a6ba-49e0-b459-e1069fc5df85-kube-api-access-gtswg\") pod \"auto-csr-approver-29536858-wjm4x\" (UID: \"963b1d1a-a6ba-49e0-b459-e1069fc5df85\") " pod="openshift-infra/auto-csr-approver-29536858-wjm4x" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.321071 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtswg\" (UniqueName: \"kubernetes.io/projected/963b1d1a-a6ba-49e0-b459-e1069fc5df85-kube-api-access-gtswg\") pod \"auto-csr-approver-29536858-wjm4x\" (UID: \"963b1d1a-a6ba-49e0-b459-e1069fc5df85\") " pod="openshift-infra/auto-csr-approver-29536858-wjm4x" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.494952 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536858-wjm4x" Feb 27 16:58:00 crc kubenswrapper[4751]: I0227 16:58:00.757494 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536858-wjm4x"] Feb 27 16:58:01 crc kubenswrapper[4751]: I0227 16:58:01.563227 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536858-wjm4x" event={"ID":"963b1d1a-a6ba-49e0-b459-e1069fc5df85","Type":"ContainerStarted","Data":"57d89e7ea8eb5d9bc9fe43fc8a96a52856a891c479d2dbd8468e1d1105dadc8e"} Feb 27 16:58:02 crc kubenswrapper[4751]: I0227 16:58:02.575049 4751 generic.go:334] "Generic (PLEG): container finished" podID="963b1d1a-a6ba-49e0-b459-e1069fc5df85" containerID="cb31a36defa79267ab510bc7b5af87bf5f72f5b361526e138b371c03160885fb" exitCode=0 Feb 27 16:58:02 crc kubenswrapper[4751]: I0227 16:58:02.575171 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536858-wjm4x" event={"ID":"963b1d1a-a6ba-49e0-b459-e1069fc5df85","Type":"ContainerDied","Data":"cb31a36defa79267ab510bc7b5af87bf5f72f5b361526e138b371c03160885fb"} Feb 27 16:58:04 crc kubenswrapper[4751]: I0227 16:58:04.021268 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536858-wjm4x" Feb 27 16:58:04 crc kubenswrapper[4751]: I0227 16:58:04.150378 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtswg\" (UniqueName: \"kubernetes.io/projected/963b1d1a-a6ba-49e0-b459-e1069fc5df85-kube-api-access-gtswg\") pod \"963b1d1a-a6ba-49e0-b459-e1069fc5df85\" (UID: \"963b1d1a-a6ba-49e0-b459-e1069fc5df85\") " Feb 27 16:58:04 crc kubenswrapper[4751]: I0227 16:58:04.157592 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/963b1d1a-a6ba-49e0-b459-e1069fc5df85-kube-api-access-gtswg" (OuterVolumeSpecName: "kube-api-access-gtswg") pod "963b1d1a-a6ba-49e0-b459-e1069fc5df85" (UID: "963b1d1a-a6ba-49e0-b459-e1069fc5df85"). InnerVolumeSpecName "kube-api-access-gtswg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 16:58:04 crc kubenswrapper[4751]: I0227 16:58:04.253472 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtswg\" (UniqueName: \"kubernetes.io/projected/963b1d1a-a6ba-49e0-b459-e1069fc5df85-kube-api-access-gtswg\") on node \"crc\" DevicePath \"\"" Feb 27 16:58:04 crc kubenswrapper[4751]: I0227 16:58:04.599445 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536858-wjm4x" event={"ID":"963b1d1a-a6ba-49e0-b459-e1069fc5df85","Type":"ContainerDied","Data":"57d89e7ea8eb5d9bc9fe43fc8a96a52856a891c479d2dbd8468e1d1105dadc8e"} Feb 27 16:58:04 crc kubenswrapper[4751]: I0227 16:58:04.599937 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57d89e7ea8eb5d9bc9fe43fc8a96a52856a891c479d2dbd8468e1d1105dadc8e" Feb 27 16:58:04 crc kubenswrapper[4751]: I0227 16:58:04.599489 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536858-wjm4x" Feb 27 16:58:04 crc kubenswrapper[4751]: E0227 16:58:04.622863 4751 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod963b1d1a_a6ba_49e0_b459_e1069fc5df85.slice\": RecentStats: unable to find data in memory cache]" Feb 27 16:58:05 crc kubenswrapper[4751]: I0227 16:58:05.126312 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536852-v2d8d"] Feb 27 16:58:05 crc kubenswrapper[4751]: I0227 16:58:05.133423 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536852-v2d8d"] Feb 27 16:58:06 crc kubenswrapper[4751]: I0227 16:58:06.535644 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a97ead17-2e26-4c04-bfda-fa3646505664" path="/var/lib/kubelet/pods/a97ead17-2e26-4c04-bfda-fa3646505664/volumes" Feb 27 16:58:07 crc kubenswrapper[4751]: I0227 16:58:07.521360 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:58:07 crc kubenswrapper[4751]: E0227 16:58:07.521804 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:58:21 crc kubenswrapper[4751]: I0227 16:58:21.520376 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:58:21 crc kubenswrapper[4751]: E0227 16:58:21.521460 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 16:58:33 crc kubenswrapper[4751]: I0227 16:58:33.521101 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 16:58:33 crc kubenswrapper[4751]: I0227 16:58:33.847571 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"df4b103e4664bc2214fc36d6fc974510e33084f406a93d660aee2115d6a78486"} Feb 27 16:58:48 crc kubenswrapper[4751]: I0227 16:58:48.614350 4751 scope.go:117] "RemoveContainer" containerID="e33f8dcfabad6257dca49dcc012b78566d47a9e5059878b90ff84ffe841eb9ca" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.146491 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536860-xnp4r"] Feb 27 17:00:00 crc kubenswrapper[4751]: E0227 17:00:00.147247 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="963b1d1a-a6ba-49e0-b459-e1069fc5df85" containerName="oc" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.147261 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="963b1d1a-a6ba-49e0-b459-e1069fc5df85" containerName="oc" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.147385 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="963b1d1a-a6ba-49e0-b459-e1069fc5df85" containerName="oc" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.147897 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536860-xnp4r" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.151972 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.152061 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.157842 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.158359 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536860-xnp4r"] Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.253997 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4"] Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.255786 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.258152 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.258708 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.263321 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4"] Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.282135 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clmvh\" (UniqueName: \"kubernetes.io/projected/30714fd1-3839-4710-ba0e-1f5325b60efb-kube-api-access-clmvh\") pod \"auto-csr-approver-29536860-xnp4r\" (UID: \"30714fd1-3839-4710-ba0e-1f5325b60efb\") " pod="openshift-infra/auto-csr-approver-29536860-xnp4r" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.383739 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgz9x\" (UniqueName: \"kubernetes.io/projected/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-kube-api-access-rgz9x\") pod \"collect-profiles-29536860-bh2g4\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.383883 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clmvh\" (UniqueName: \"kubernetes.io/projected/30714fd1-3839-4710-ba0e-1f5325b60efb-kube-api-access-clmvh\") pod \"auto-csr-approver-29536860-xnp4r\" (UID: \"30714fd1-3839-4710-ba0e-1f5325b60efb\") " pod="openshift-infra/auto-csr-approver-29536860-xnp4r" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.383945 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-secret-volume\") pod \"collect-profiles-29536860-bh2g4\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.384064 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-config-volume\") pod \"collect-profiles-29536860-bh2g4\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.403259 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clmvh\" (UniqueName: \"kubernetes.io/projected/30714fd1-3839-4710-ba0e-1f5325b60efb-kube-api-access-clmvh\") pod \"auto-csr-approver-29536860-xnp4r\" (UID: \"30714fd1-3839-4710-ba0e-1f5325b60efb\") " pod="openshift-infra/auto-csr-approver-29536860-xnp4r" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.483794 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536860-xnp4r" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.485461 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-config-volume\") pod \"collect-profiles-29536860-bh2g4\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.485572 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgz9x\" (UniqueName: \"kubernetes.io/projected/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-kube-api-access-rgz9x\") pod \"collect-profiles-29536860-bh2g4\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.485616 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-secret-volume\") pod \"collect-profiles-29536860-bh2g4\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.487088 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-config-volume\") pod \"collect-profiles-29536860-bh2g4\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.491675 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-secret-volume\") pod \"collect-profiles-29536860-bh2g4\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.505832 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgz9x\" (UniqueName: \"kubernetes.io/projected/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-kube-api-access-rgz9x\") pod \"collect-profiles-29536860-bh2g4\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.581883 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:00 crc kubenswrapper[4751]: I0227 17:00:00.957079 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536860-xnp4r"] Feb 27 17:00:00 crc kubenswrapper[4751]: W0227 17:00:00.964586 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30714fd1_3839_4710_ba0e_1f5325b60efb.slice/crio-09e59f02b7f6bbbf69703fca8c29b750ea22e5d7308fd8f99ac3d2c04e734529 WatchSource:0}: Error finding container 09e59f02b7f6bbbf69703fca8c29b750ea22e5d7308fd8f99ac3d2c04e734529: Status 404 returned error can't find the container with id 09e59f02b7f6bbbf69703fca8c29b750ea22e5d7308fd8f99ac3d2c04e734529 Feb 27 17:00:01 crc kubenswrapper[4751]: W0227 17:00:01.020055 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd785ec2_2ac7_47fb_bc33_07aa127fe1d0.slice/crio-ea4a46bb90f8a99aaba0ba3113feac4eb8d1ef201f510b352adfc5c763d53e9a WatchSource:0}: Error finding container ea4a46bb90f8a99aaba0ba3113feac4eb8d1ef201f510b352adfc5c763d53e9a: Status 404 returned error can't find the container with id ea4a46bb90f8a99aaba0ba3113feac4eb8d1ef201f510b352adfc5c763d53e9a Feb 27 17:00:01 crc kubenswrapper[4751]: I0227 17:00:01.020139 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4"] Feb 27 17:00:01 crc kubenswrapper[4751]: I0227 17:00:01.681037 4751 generic.go:334] "Generic (PLEG): container finished" podID="dd785ec2-2ac7-47fb-bc33-07aa127fe1d0" containerID="71bd1940900031bf1ce66d5f9d67f7d46233883d0b3818018ae7387976401f17" exitCode=0 Feb 27 17:00:01 crc kubenswrapper[4751]: I0227 17:00:01.681131 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" event={"ID":"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0","Type":"ContainerDied","Data":"71bd1940900031bf1ce66d5f9d67f7d46233883d0b3818018ae7387976401f17"} Feb 27 17:00:01 crc kubenswrapper[4751]: I0227 17:00:01.681491 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" event={"ID":"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0","Type":"ContainerStarted","Data":"ea4a46bb90f8a99aaba0ba3113feac4eb8d1ef201f510b352adfc5c763d53e9a"} Feb 27 17:00:01 crc kubenswrapper[4751]: I0227 17:00:01.684266 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536860-xnp4r" event={"ID":"30714fd1-3839-4710-ba0e-1f5325b60efb","Type":"ContainerStarted","Data":"09e59f02b7f6bbbf69703fca8c29b750ea22e5d7308fd8f99ac3d2c04e734529"} Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.077193 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.135557 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgz9x\" (UniqueName: \"kubernetes.io/projected/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-kube-api-access-rgz9x\") pod \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.135831 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-config-volume\") pod \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.135878 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-secret-volume\") pod \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\" (UID: \"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0\") " Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.136527 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-config-volume" (OuterVolumeSpecName: "config-volume") pod "dd785ec2-2ac7-47fb-bc33-07aa127fe1d0" (UID: "dd785ec2-2ac7-47fb-bc33-07aa127fe1d0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.141263 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-kube-api-access-rgz9x" (OuterVolumeSpecName: "kube-api-access-rgz9x") pod "dd785ec2-2ac7-47fb-bc33-07aa127fe1d0" (UID: "dd785ec2-2ac7-47fb-bc33-07aa127fe1d0"). InnerVolumeSpecName "kube-api-access-rgz9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.141883 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "dd785ec2-2ac7-47fb-bc33-07aa127fe1d0" (UID: "dd785ec2-2ac7-47fb-bc33-07aa127fe1d0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.238662 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgz9x\" (UniqueName: \"kubernetes.io/projected/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-kube-api-access-rgz9x\") on node \"crc\" DevicePath \"\"" Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.238703 4751 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-config-volume\") on node \"crc\" DevicePath \"\"" Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.238716 4751 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.703720 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.703773 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4" event={"ID":"dd785ec2-2ac7-47fb-bc33-07aa127fe1d0","Type":"ContainerDied","Data":"ea4a46bb90f8a99aaba0ba3113feac4eb8d1ef201f510b352adfc5c763d53e9a"} Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.704190 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea4a46bb90f8a99aaba0ba3113feac4eb8d1ef201f510b352adfc5c763d53e9a" Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.705516 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536860-xnp4r" event={"ID":"30714fd1-3839-4710-ba0e-1f5325b60efb","Type":"ContainerStarted","Data":"ac1de1c579943b9ab36bd29820c25a775206ba68b080001b6fea99caa02dbc31"} Feb 27 17:00:03 crc kubenswrapper[4751]: I0227 17:00:03.721378 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536860-xnp4r" podStartSLOduration=1.393527298 podStartE2EDuration="3.721355691s" podCreationTimestamp="2026-02-27 17:00:00 +0000 UTC" firstStartedPulling="2026-02-27 17:00:00.966931432 +0000 UTC m=+2163.113945879" lastFinishedPulling="2026-02-27 17:00:03.294759815 +0000 UTC m=+2165.441774272" observedRunningTime="2026-02-27 17:00:03.719019699 +0000 UTC m=+2165.866034176" watchObservedRunningTime="2026-02-27 17:00:03.721355691 +0000 UTC m=+2165.868370168" Feb 27 17:00:04 crc kubenswrapper[4751]: I0227 17:00:04.173267 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph"] Feb 27 17:00:04 crc kubenswrapper[4751]: I0227 17:00:04.184579 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536815-fc4ph"] Feb 27 17:00:04 crc kubenswrapper[4751]: I0227 17:00:04.530351 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2680126d-1cf3-4cbd-a130-3d8d0070a394" path="/var/lib/kubelet/pods/2680126d-1cf3-4cbd-a130-3d8d0070a394/volumes" Feb 27 17:00:04 crc kubenswrapper[4751]: I0227 17:00:04.733045 4751 generic.go:334] "Generic (PLEG): container finished" podID="30714fd1-3839-4710-ba0e-1f5325b60efb" containerID="ac1de1c579943b9ab36bd29820c25a775206ba68b080001b6fea99caa02dbc31" exitCode=0 Feb 27 17:00:04 crc kubenswrapper[4751]: I0227 17:00:04.733516 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536860-xnp4r" event={"ID":"30714fd1-3839-4710-ba0e-1f5325b60efb","Type":"ContainerDied","Data":"ac1de1c579943b9ab36bd29820c25a775206ba68b080001b6fea99caa02dbc31"} Feb 27 17:00:06 crc kubenswrapper[4751]: I0227 17:00:06.049144 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536860-xnp4r" Feb 27 17:00:06 crc kubenswrapper[4751]: I0227 17:00:06.183933 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clmvh\" (UniqueName: \"kubernetes.io/projected/30714fd1-3839-4710-ba0e-1f5325b60efb-kube-api-access-clmvh\") pod \"30714fd1-3839-4710-ba0e-1f5325b60efb\" (UID: \"30714fd1-3839-4710-ba0e-1f5325b60efb\") " Feb 27 17:00:06 crc kubenswrapper[4751]: I0227 17:00:06.190735 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30714fd1-3839-4710-ba0e-1f5325b60efb-kube-api-access-clmvh" (OuterVolumeSpecName: "kube-api-access-clmvh") pod "30714fd1-3839-4710-ba0e-1f5325b60efb" (UID: "30714fd1-3839-4710-ba0e-1f5325b60efb"). InnerVolumeSpecName "kube-api-access-clmvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:00:06 crc kubenswrapper[4751]: I0227 17:00:06.286114 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clmvh\" (UniqueName: \"kubernetes.io/projected/30714fd1-3839-4710-ba0e-1f5325b60efb-kube-api-access-clmvh\") on node \"crc\" DevicePath \"\"" Feb 27 17:00:06 crc kubenswrapper[4751]: I0227 17:00:06.755778 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536860-xnp4r" event={"ID":"30714fd1-3839-4710-ba0e-1f5325b60efb","Type":"ContainerDied","Data":"09e59f02b7f6bbbf69703fca8c29b750ea22e5d7308fd8f99ac3d2c04e734529"} Feb 27 17:00:06 crc kubenswrapper[4751]: I0227 17:00:06.755830 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09e59f02b7f6bbbf69703fca8c29b750ea22e5d7308fd8f99ac3d2c04e734529" Feb 27 17:00:06 crc kubenswrapper[4751]: I0227 17:00:06.755876 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536860-xnp4r" Feb 27 17:00:06 crc kubenswrapper[4751]: I0227 17:00:06.797636 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536854-2ml7s"] Feb 27 17:00:06 crc kubenswrapper[4751]: I0227 17:00:06.803607 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536854-2ml7s"] Feb 27 17:00:08 crc kubenswrapper[4751]: I0227 17:00:08.535215 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b41c6f8b-2a1f-4b76-8343-4907b52fb82c" path="/var/lib/kubelet/pods/b41c6f8b-2a1f-4b76-8343-4907b52fb82c/volumes" Feb 27 17:00:48 crc kubenswrapper[4751]: I0227 17:00:48.727355 4751 scope.go:117] "RemoveContainer" containerID="38f9d3e3e1e837826613fae5a0ad31b0cbb1d346cd35d3364c567c6b64b60399" Feb 27 17:00:48 crc kubenswrapper[4751]: I0227 17:00:48.762123 4751 scope.go:117] "RemoveContainer" containerID="1efdf364fba1948dedcdc72d9b5c9a673f03eb81f377c55f4e2eb61a2994ef00" Feb 27 17:00:58 crc kubenswrapper[4751]: I0227 17:00:58.918010 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:00:58 crc kubenswrapper[4751]: I0227 17:00:58.919049 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.043446 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hdkgk"] Feb 27 17:01:26 crc kubenswrapper[4751]: E0227 17:01:26.044234 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd785ec2-2ac7-47fb-bc33-07aa127fe1d0" containerName="collect-profiles" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.044250 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd785ec2-2ac7-47fb-bc33-07aa127fe1d0" containerName="collect-profiles" Feb 27 17:01:26 crc kubenswrapper[4751]: E0227 17:01:26.044273 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30714fd1-3839-4710-ba0e-1f5325b60efb" containerName="oc" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.044283 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="30714fd1-3839-4710-ba0e-1f5325b60efb" containerName="oc" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.044482 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="30714fd1-3839-4710-ba0e-1f5325b60efb" containerName="oc" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.044498 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd785ec2-2ac7-47fb-bc33-07aa127fe1d0" containerName="collect-profiles" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.045628 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.059381 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hdkgk"] Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.197113 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-utilities\") pod \"certified-operators-hdkgk\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.197425 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-catalog-content\") pod \"certified-operators-hdkgk\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.197700 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxdkw\" (UniqueName: \"kubernetes.io/projected/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-kube-api-access-bxdkw\") pod \"certified-operators-hdkgk\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.299438 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-utilities\") pod \"certified-operators-hdkgk\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.299828 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-catalog-content\") pod \"certified-operators-hdkgk\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.300006 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxdkw\" (UniqueName: \"kubernetes.io/projected/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-kube-api-access-bxdkw\") pod \"certified-operators-hdkgk\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.300221 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-catalog-content\") pod \"certified-operators-hdkgk\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.300021 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-utilities\") pod \"certified-operators-hdkgk\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.321264 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxdkw\" (UniqueName: \"kubernetes.io/projected/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-kube-api-access-bxdkw\") pod \"certified-operators-hdkgk\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.374741 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:26 crc kubenswrapper[4751]: I0227 17:01:26.891477 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hdkgk"] Feb 27 17:01:26 crc kubenswrapper[4751]: W0227 17:01:26.901080 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6bdb1e46_a70e_4981_8ed2_0c9a5142c095.slice/crio-c5deeb8dd3aa93583bf54433ea90a5235e83a31a9c8cce58264d3c8a32ea5ea4 WatchSource:0}: Error finding container c5deeb8dd3aa93583bf54433ea90a5235e83a31a9c8cce58264d3c8a32ea5ea4: Status 404 returned error can't find the container with id c5deeb8dd3aa93583bf54433ea90a5235e83a31a9c8cce58264d3c8a32ea5ea4 Feb 27 17:01:27 crc kubenswrapper[4751]: I0227 17:01:27.562005 4751 generic.go:334] "Generic (PLEG): container finished" podID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerID="0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692" exitCode=0 Feb 27 17:01:27 crc kubenswrapper[4751]: I0227 17:01:27.562070 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdkgk" event={"ID":"6bdb1e46-a70e-4981-8ed2-0c9a5142c095","Type":"ContainerDied","Data":"0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692"} Feb 27 17:01:27 crc kubenswrapper[4751]: I0227 17:01:27.562112 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdkgk" event={"ID":"6bdb1e46-a70e-4981-8ed2-0c9a5142c095","Type":"ContainerStarted","Data":"c5deeb8dd3aa93583bf54433ea90a5235e83a31a9c8cce58264d3c8a32ea5ea4"} Feb 27 17:01:27 crc kubenswrapper[4751]: I0227 17:01:27.565097 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:01:28 crc kubenswrapper[4751]: I0227 17:01:28.917975 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:01:28 crc kubenswrapper[4751]: I0227 17:01:28.918275 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:01:29 crc kubenswrapper[4751]: I0227 17:01:29.586541 4751 generic.go:334] "Generic (PLEG): container finished" podID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerID="a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1" exitCode=0 Feb 27 17:01:29 crc kubenswrapper[4751]: I0227 17:01:29.586722 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdkgk" event={"ID":"6bdb1e46-a70e-4981-8ed2-0c9a5142c095","Type":"ContainerDied","Data":"a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1"} Feb 27 17:01:30 crc kubenswrapper[4751]: I0227 17:01:30.600345 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdkgk" event={"ID":"6bdb1e46-a70e-4981-8ed2-0c9a5142c095","Type":"ContainerStarted","Data":"ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d"} Feb 27 17:01:30 crc kubenswrapper[4751]: I0227 17:01:30.634317 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hdkgk" podStartSLOduration=2.096940735 podStartE2EDuration="4.634275075s" podCreationTimestamp="2026-02-27 17:01:26 +0000 UTC" firstStartedPulling="2026-02-27 17:01:27.564660746 +0000 UTC m=+2249.711675223" lastFinishedPulling="2026-02-27 17:01:30.101995086 +0000 UTC m=+2252.249009563" observedRunningTime="2026-02-27 17:01:30.625594074 +0000 UTC m=+2252.772608611" watchObservedRunningTime="2026-02-27 17:01:30.634275075 +0000 UTC m=+2252.781289562" Feb 27 17:01:36 crc kubenswrapper[4751]: I0227 17:01:36.375174 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:36 crc kubenswrapper[4751]: I0227 17:01:36.375853 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:36 crc kubenswrapper[4751]: I0227 17:01:36.430958 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:36 crc kubenswrapper[4751]: I0227 17:01:36.743561 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:36 crc kubenswrapper[4751]: I0227 17:01:36.815359 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hdkgk"] Feb 27 17:01:38 crc kubenswrapper[4751]: I0227 17:01:38.692736 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hdkgk" podUID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerName="registry-server" containerID="cri-o://ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d" gracePeriod=2 Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.259067 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.326979 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxdkw\" (UniqueName: \"kubernetes.io/projected/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-kube-api-access-bxdkw\") pod \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.327109 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-utilities\") pod \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.327140 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-catalog-content\") pod \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\" (UID: \"6bdb1e46-a70e-4981-8ed2-0c9a5142c095\") " Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.328112 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-utilities" (OuterVolumeSpecName: "utilities") pod "6bdb1e46-a70e-4981-8ed2-0c9a5142c095" (UID: "6bdb1e46-a70e-4981-8ed2-0c9a5142c095"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.333313 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-kube-api-access-bxdkw" (OuterVolumeSpecName: "kube-api-access-bxdkw") pod "6bdb1e46-a70e-4981-8ed2-0c9a5142c095" (UID: "6bdb1e46-a70e-4981-8ed2-0c9a5142c095"). InnerVolumeSpecName "kube-api-access-bxdkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.422796 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6bdb1e46-a70e-4981-8ed2-0c9a5142c095" (UID: "6bdb1e46-a70e-4981-8ed2-0c9a5142c095"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.428650 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxdkw\" (UniqueName: \"kubernetes.io/projected/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-kube-api-access-bxdkw\") on node \"crc\" DevicePath \"\"" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.428682 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.428694 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bdb1e46-a70e-4981-8ed2-0c9a5142c095-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.703876 4751 generic.go:334] "Generic (PLEG): container finished" podID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerID="ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d" exitCode=0 Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.703924 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdkgk" event={"ID":"6bdb1e46-a70e-4981-8ed2-0c9a5142c095","Type":"ContainerDied","Data":"ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d"} Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.703957 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdkgk" event={"ID":"6bdb1e46-a70e-4981-8ed2-0c9a5142c095","Type":"ContainerDied","Data":"c5deeb8dd3aa93583bf54433ea90a5235e83a31a9c8cce58264d3c8a32ea5ea4"} Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.703975 4751 scope.go:117] "RemoveContainer" containerID="ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.704040 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hdkgk" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.734013 4751 scope.go:117] "RemoveContainer" containerID="a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.752473 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hdkgk"] Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.768173 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hdkgk"] Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.785037 4751 scope.go:117] "RemoveContainer" containerID="0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.805368 4751 scope.go:117] "RemoveContainer" containerID="ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d" Feb 27 17:01:39 crc kubenswrapper[4751]: E0227 17:01:39.806089 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d\": container with ID starting with ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d not found: ID does not exist" containerID="ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.806122 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d"} err="failed to get container status \"ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d\": rpc error: code = NotFound desc = could not find container \"ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d\": container with ID starting with ba21f34da8baa34c05fda4887699b7fb280db16eeb43a7c0a4ace82afe71662d not found: ID does not exist" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.806143 4751 scope.go:117] "RemoveContainer" containerID="a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1" Feb 27 17:01:39 crc kubenswrapper[4751]: E0227 17:01:39.806437 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1\": container with ID starting with a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1 not found: ID does not exist" containerID="a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.806458 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1"} err="failed to get container status \"a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1\": rpc error: code = NotFound desc = could not find container \"a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1\": container with ID starting with a83dd14881e1acebd7afd4b220367f999fba13e185dd2f8d63447ff72915d3b1 not found: ID does not exist" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.806471 4751 scope.go:117] "RemoveContainer" containerID="0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692" Feb 27 17:01:39 crc kubenswrapper[4751]: E0227 17:01:39.806866 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692\": container with ID starting with 0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692 not found: ID does not exist" containerID="0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692" Feb 27 17:01:39 crc kubenswrapper[4751]: I0227 17:01:39.806892 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692"} err="failed to get container status \"0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692\": rpc error: code = NotFound desc = could not find container \"0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692\": container with ID starting with 0518fd500f9058115f678dd5acc1354c7429b8e80209acf42334c09f5b107692 not found: ID does not exist" Feb 27 17:01:40 crc kubenswrapper[4751]: I0227 17:01:40.532736 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" path="/var/lib/kubelet/pods/6bdb1e46-a70e-4981-8ed2-0c9a5142c095/volumes" Feb 27 17:01:58 crc kubenswrapper[4751]: I0227 17:01:58.918354 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:01:58 crc kubenswrapper[4751]: I0227 17:01:58.919108 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:01:58 crc kubenswrapper[4751]: I0227 17:01:58.919182 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 17:01:58 crc kubenswrapper[4751]: I0227 17:01:58.920164 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"df4b103e4664bc2214fc36d6fc974510e33084f406a93d660aee2115d6a78486"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 17:01:58 crc kubenswrapper[4751]: I0227 17:01:58.920264 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://df4b103e4664bc2214fc36d6fc974510e33084f406a93d660aee2115d6a78486" gracePeriod=600 Feb 27 17:01:59 crc kubenswrapper[4751]: I0227 17:01:59.886783 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"df4b103e4664bc2214fc36d6fc974510e33084f406a93d660aee2115d6a78486"} Feb 27 17:01:59 crc kubenswrapper[4751]: I0227 17:01:59.887847 4751 scope.go:117] "RemoveContainer" containerID="1ab1bfea98a991e859a70f849c8c1d763e7532191e9d62e2574f946c87993ff0" Feb 27 17:01:59 crc kubenswrapper[4751]: I0227 17:01:59.886807 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="df4b103e4664bc2214fc36d6fc974510e33084f406a93d660aee2115d6a78486" exitCode=0 Feb 27 17:01:59 crc kubenswrapper[4751]: I0227 17:01:59.888582 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407"} Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.160584 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536862-k66j6"] Feb 27 17:02:00 crc kubenswrapper[4751]: E0227 17:02:00.161113 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerName="extract-content" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.161135 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerName="extract-content" Feb 27 17:02:00 crc kubenswrapper[4751]: E0227 17:02:00.161158 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerName="registry-server" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.161197 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerName="registry-server" Feb 27 17:02:00 crc kubenswrapper[4751]: E0227 17:02:00.161237 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerName="extract-utilities" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.161250 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerName="extract-utilities" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.161580 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bdb1e46-a70e-4981-8ed2-0c9a5142c095" containerName="registry-server" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.162370 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536862-k66j6" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.165561 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.165982 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.166149 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.176974 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536862-k66j6"] Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.313736 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzbpp\" (UniqueName: \"kubernetes.io/projected/87d84eef-d1b9-4496-9a93-758b8719da36-kube-api-access-xzbpp\") pod \"auto-csr-approver-29536862-k66j6\" (UID: \"87d84eef-d1b9-4496-9a93-758b8719da36\") " pod="openshift-infra/auto-csr-approver-29536862-k66j6" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.415573 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzbpp\" (UniqueName: \"kubernetes.io/projected/87d84eef-d1b9-4496-9a93-758b8719da36-kube-api-access-xzbpp\") pod \"auto-csr-approver-29536862-k66j6\" (UID: \"87d84eef-d1b9-4496-9a93-758b8719da36\") " pod="openshift-infra/auto-csr-approver-29536862-k66j6" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.451439 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzbpp\" (UniqueName: \"kubernetes.io/projected/87d84eef-d1b9-4496-9a93-758b8719da36-kube-api-access-xzbpp\") pod \"auto-csr-approver-29536862-k66j6\" (UID: \"87d84eef-d1b9-4496-9a93-758b8719da36\") " pod="openshift-infra/auto-csr-approver-29536862-k66j6" Feb 27 17:02:00 crc kubenswrapper[4751]: I0227 17:02:00.500483 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536862-k66j6" Feb 27 17:02:01 crc kubenswrapper[4751]: I0227 17:02:01.005129 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536862-k66j6"] Feb 27 17:02:01 crc kubenswrapper[4751]: I0227 17:02:01.910092 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536862-k66j6" event={"ID":"87d84eef-d1b9-4496-9a93-758b8719da36","Type":"ContainerStarted","Data":"af7bfc24fc19149717b4c54873b6fd7facd1c5fd5bc729aa4c4e3dff7a15af86"} Feb 27 17:02:02 crc kubenswrapper[4751]: I0227 17:02:02.922382 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536862-k66j6" event={"ID":"87d84eef-d1b9-4496-9a93-758b8719da36","Type":"ContainerStarted","Data":"3ef0dda66111a89aa652b91ad74327fc1f11986918cfca5b2d065df55f71de29"} Feb 27 17:02:02 crc kubenswrapper[4751]: I0227 17:02:02.945480 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536862-k66j6" podStartSLOduration=1.6386578109999999 podStartE2EDuration="2.945452691s" podCreationTimestamp="2026-02-27 17:02:00 +0000 UTC" firstStartedPulling="2026-02-27 17:02:01.01790848 +0000 UTC m=+2283.164922947" lastFinishedPulling="2026-02-27 17:02:02.32470333 +0000 UTC m=+2284.471717827" observedRunningTime="2026-02-27 17:02:02.942083151 +0000 UTC m=+2285.089097618" watchObservedRunningTime="2026-02-27 17:02:02.945452691 +0000 UTC m=+2285.092467178" Feb 27 17:02:03 crc kubenswrapper[4751]: I0227 17:02:03.950148 4751 generic.go:334] "Generic (PLEG): container finished" podID="87d84eef-d1b9-4496-9a93-758b8719da36" containerID="3ef0dda66111a89aa652b91ad74327fc1f11986918cfca5b2d065df55f71de29" exitCode=0 Feb 27 17:02:03 crc kubenswrapper[4751]: I0227 17:02:03.950221 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536862-k66j6" event={"ID":"87d84eef-d1b9-4496-9a93-758b8719da36","Type":"ContainerDied","Data":"3ef0dda66111a89aa652b91ad74327fc1f11986918cfca5b2d065df55f71de29"} Feb 27 17:02:05 crc kubenswrapper[4751]: I0227 17:02:05.399263 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536862-k66j6" Feb 27 17:02:05 crc kubenswrapper[4751]: I0227 17:02:05.514784 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzbpp\" (UniqueName: \"kubernetes.io/projected/87d84eef-d1b9-4496-9a93-758b8719da36-kube-api-access-xzbpp\") pod \"87d84eef-d1b9-4496-9a93-758b8719da36\" (UID: \"87d84eef-d1b9-4496-9a93-758b8719da36\") " Feb 27 17:02:05 crc kubenswrapper[4751]: I0227 17:02:05.520824 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87d84eef-d1b9-4496-9a93-758b8719da36-kube-api-access-xzbpp" (OuterVolumeSpecName: "kube-api-access-xzbpp") pod "87d84eef-d1b9-4496-9a93-758b8719da36" (UID: "87d84eef-d1b9-4496-9a93-758b8719da36"). InnerVolumeSpecName "kube-api-access-xzbpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:02:05 crc kubenswrapper[4751]: I0227 17:02:05.617008 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzbpp\" (UniqueName: \"kubernetes.io/projected/87d84eef-d1b9-4496-9a93-758b8719da36-kube-api-access-xzbpp\") on node \"crc\" DevicePath \"\"" Feb 27 17:02:05 crc kubenswrapper[4751]: I0227 17:02:05.998663 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536862-k66j6" event={"ID":"87d84eef-d1b9-4496-9a93-758b8719da36","Type":"ContainerDied","Data":"af7bfc24fc19149717b4c54873b6fd7facd1c5fd5bc729aa4c4e3dff7a15af86"} Feb 27 17:02:05 crc kubenswrapper[4751]: I0227 17:02:05.999255 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af7bfc24fc19149717b4c54873b6fd7facd1c5fd5bc729aa4c4e3dff7a15af86" Feb 27 17:02:05 crc kubenswrapper[4751]: I0227 17:02:05.999371 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536862-k66j6" Feb 27 17:02:06 crc kubenswrapper[4751]: I0227 17:02:06.045802 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536856-db2sg"] Feb 27 17:02:06 crc kubenswrapper[4751]: I0227 17:02:06.056262 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536856-db2sg"] Feb 27 17:02:06 crc kubenswrapper[4751]: I0227 17:02:06.539056 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebeadd22-44dc-445d-a739-41d6a85ca738" path="/var/lib/kubelet/pods/ebeadd22-44dc-445d-a739-41d6a85ca738/volumes" Feb 27 17:02:48 crc kubenswrapper[4751]: I0227 17:02:48.941632 4751 scope.go:117] "RemoveContainer" containerID="cecf25500477f87bf157cbdb2c8187cccaf59bab938caca58b44dfb2d58e4b56" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.154129 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536864-rwwt8"] Feb 27 17:04:00 crc kubenswrapper[4751]: E0227 17:04:00.155240 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87d84eef-d1b9-4496-9a93-758b8719da36" containerName="oc" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.155263 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="87d84eef-d1b9-4496-9a93-758b8719da36" containerName="oc" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.155622 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="87d84eef-d1b9-4496-9a93-758b8719da36" containerName="oc" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.156334 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536864-rwwt8" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.159727 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.160859 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.163872 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.167290 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536864-rwwt8"] Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.283078 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4t99v\" (UniqueName: \"kubernetes.io/projected/4dc0281f-c3f5-4047-a1fd-a305228ed8a5-kube-api-access-4t99v\") pod \"auto-csr-approver-29536864-rwwt8\" (UID: \"4dc0281f-c3f5-4047-a1fd-a305228ed8a5\") " pod="openshift-infra/auto-csr-approver-29536864-rwwt8" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.385127 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4t99v\" (UniqueName: \"kubernetes.io/projected/4dc0281f-c3f5-4047-a1fd-a305228ed8a5-kube-api-access-4t99v\") pod \"auto-csr-approver-29536864-rwwt8\" (UID: \"4dc0281f-c3f5-4047-a1fd-a305228ed8a5\") " pod="openshift-infra/auto-csr-approver-29536864-rwwt8" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.431034 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4t99v\" (UniqueName: \"kubernetes.io/projected/4dc0281f-c3f5-4047-a1fd-a305228ed8a5-kube-api-access-4t99v\") pod \"auto-csr-approver-29536864-rwwt8\" (UID: \"4dc0281f-c3f5-4047-a1fd-a305228ed8a5\") " pod="openshift-infra/auto-csr-approver-29536864-rwwt8" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.493769 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536864-rwwt8" Feb 27 17:04:00 crc kubenswrapper[4751]: I0227 17:04:00.935858 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536864-rwwt8"] Feb 27 17:04:01 crc kubenswrapper[4751]: I0227 17:04:01.020680 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536864-rwwt8" event={"ID":"4dc0281f-c3f5-4047-a1fd-a305228ed8a5","Type":"ContainerStarted","Data":"6af30bb6d6417cf98fbd78c358a0543f83a0561e3bdca659e39876feff93b122"} Feb 27 17:04:03 crc kubenswrapper[4751]: I0227 17:04:03.043228 4751 generic.go:334] "Generic (PLEG): container finished" podID="4dc0281f-c3f5-4047-a1fd-a305228ed8a5" containerID="67d9136de76a65c594cc776684f377177c18c9bd02390971709d96245b681b23" exitCode=0 Feb 27 17:04:03 crc kubenswrapper[4751]: I0227 17:04:03.043347 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536864-rwwt8" event={"ID":"4dc0281f-c3f5-4047-a1fd-a305228ed8a5","Type":"ContainerDied","Data":"67d9136de76a65c594cc776684f377177c18c9bd02390971709d96245b681b23"} Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.029109 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mxjgw"] Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.030886 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.051126 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mxjgw"] Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.151775 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-utilities\") pod \"redhat-marketplace-mxjgw\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.151821 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-catalog-content\") pod \"redhat-marketplace-mxjgw\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.151865 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vw9d\" (UniqueName: \"kubernetes.io/projected/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-kube-api-access-9vw9d\") pod \"redhat-marketplace-mxjgw\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.253631 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-utilities\") pod \"redhat-marketplace-mxjgw\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.253675 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-catalog-content\") pod \"redhat-marketplace-mxjgw\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.253737 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vw9d\" (UniqueName: \"kubernetes.io/projected/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-kube-api-access-9vw9d\") pod \"redhat-marketplace-mxjgw\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.254590 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-utilities\") pod \"redhat-marketplace-mxjgw\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.254858 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-catalog-content\") pod \"redhat-marketplace-mxjgw\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.279924 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vw9d\" (UniqueName: \"kubernetes.io/projected/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-kube-api-access-9vw9d\") pod \"redhat-marketplace-mxjgw\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.378209 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.433917 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536864-rwwt8" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.558810 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4t99v\" (UniqueName: \"kubernetes.io/projected/4dc0281f-c3f5-4047-a1fd-a305228ed8a5-kube-api-access-4t99v\") pod \"4dc0281f-c3f5-4047-a1fd-a305228ed8a5\" (UID: \"4dc0281f-c3f5-4047-a1fd-a305228ed8a5\") " Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.576654 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dc0281f-c3f5-4047-a1fd-a305228ed8a5-kube-api-access-4t99v" (OuterVolumeSpecName: "kube-api-access-4t99v") pod "4dc0281f-c3f5-4047-a1fd-a305228ed8a5" (UID: "4dc0281f-c3f5-4047-a1fd-a305228ed8a5"). InnerVolumeSpecName "kube-api-access-4t99v". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.655150 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mxjgw"] Feb 27 17:04:04 crc kubenswrapper[4751]: W0227 17:04:04.656937 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab96dffb_bdd9_4697_9a6c_84c0ac3267c2.slice/crio-85ca80afb5e19839b1268d41243cb0a92ed1bace36742e19bda3b30759a72792 WatchSource:0}: Error finding container 85ca80afb5e19839b1268d41243cb0a92ed1bace36742e19bda3b30759a72792: Status 404 returned error can't find the container with id 85ca80afb5e19839b1268d41243cb0a92ed1bace36742e19bda3b30759a72792 Feb 27 17:04:04 crc kubenswrapper[4751]: I0227 17:04:04.660162 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4t99v\" (UniqueName: \"kubernetes.io/projected/4dc0281f-c3f5-4047-a1fd-a305228ed8a5-kube-api-access-4t99v\") on node \"crc\" DevicePath \"\"" Feb 27 17:04:05 crc kubenswrapper[4751]: I0227 17:04:05.064456 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536864-rwwt8" event={"ID":"4dc0281f-c3f5-4047-a1fd-a305228ed8a5","Type":"ContainerDied","Data":"6af30bb6d6417cf98fbd78c358a0543f83a0561e3bdca659e39876feff93b122"} Feb 27 17:04:05 crc kubenswrapper[4751]: I0227 17:04:05.064501 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536864-rwwt8" Feb 27 17:04:05 crc kubenswrapper[4751]: I0227 17:04:05.064534 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6af30bb6d6417cf98fbd78c358a0543f83a0561e3bdca659e39876feff93b122" Feb 27 17:04:05 crc kubenswrapper[4751]: I0227 17:04:05.072083 4751 generic.go:334] "Generic (PLEG): container finished" podID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerID="dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6" exitCode=0 Feb 27 17:04:05 crc kubenswrapper[4751]: I0227 17:04:05.072142 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxjgw" event={"ID":"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2","Type":"ContainerDied","Data":"dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6"} Feb 27 17:04:05 crc kubenswrapper[4751]: I0227 17:04:05.072185 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxjgw" event={"ID":"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2","Type":"ContainerStarted","Data":"85ca80afb5e19839b1268d41243cb0a92ed1bace36742e19bda3b30759a72792"} Feb 27 17:04:05 crc kubenswrapper[4751]: I0227 17:04:05.522871 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536858-wjm4x"] Feb 27 17:04:05 crc kubenswrapper[4751]: I0227 17:04:05.532296 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536858-wjm4x"] Feb 27 17:04:06 crc kubenswrapper[4751]: I0227 17:04:06.088572 4751 generic.go:334] "Generic (PLEG): container finished" podID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerID="4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f" exitCode=0 Feb 27 17:04:06 crc kubenswrapper[4751]: I0227 17:04:06.088619 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxjgw" event={"ID":"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2","Type":"ContainerDied","Data":"4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f"} Feb 27 17:04:06 crc kubenswrapper[4751]: I0227 17:04:06.531597 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="963b1d1a-a6ba-49e0-b459-e1069fc5df85" path="/var/lib/kubelet/pods/963b1d1a-a6ba-49e0-b459-e1069fc5df85/volumes" Feb 27 17:04:07 crc kubenswrapper[4751]: I0227 17:04:07.103059 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxjgw" event={"ID":"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2","Type":"ContainerStarted","Data":"c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425"} Feb 27 17:04:07 crc kubenswrapper[4751]: I0227 17:04:07.139045 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mxjgw" podStartSLOduration=2.70811551 podStartE2EDuration="4.139020706s" podCreationTimestamp="2026-02-27 17:04:03 +0000 UTC" firstStartedPulling="2026-02-27 17:04:05.077055347 +0000 UTC m=+2407.224069824" lastFinishedPulling="2026-02-27 17:04:06.507960533 +0000 UTC m=+2408.654975020" observedRunningTime="2026-02-27 17:04:07.135430691 +0000 UTC m=+2409.282445168" watchObservedRunningTime="2026-02-27 17:04:07.139020706 +0000 UTC m=+2409.286035183" Feb 27 17:04:14 crc kubenswrapper[4751]: I0227 17:04:14.378442 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:14 crc kubenswrapper[4751]: I0227 17:04:14.379155 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:14 crc kubenswrapper[4751]: I0227 17:04:14.446443 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:15 crc kubenswrapper[4751]: I0227 17:04:15.236047 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:15 crc kubenswrapper[4751]: I0227 17:04:15.297447 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mxjgw"] Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.185720 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mxjgw" podUID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerName="registry-server" containerID="cri-o://c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425" gracePeriod=2 Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.674531 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.763810 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vw9d\" (UniqueName: \"kubernetes.io/projected/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-kube-api-access-9vw9d\") pod \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.763966 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-utilities\") pod \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.764011 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-catalog-content\") pod \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\" (UID: \"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2\") " Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.767711 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-utilities" (OuterVolumeSpecName: "utilities") pod "ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" (UID: "ab96dffb-bdd9-4697-9a6c-84c0ac3267c2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.769281 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-kube-api-access-9vw9d" (OuterVolumeSpecName: "kube-api-access-9vw9d") pod "ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" (UID: "ab96dffb-bdd9-4697-9a6c-84c0ac3267c2"). InnerVolumeSpecName "kube-api-access-9vw9d". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.802705 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" (UID: "ab96dffb-bdd9-4697-9a6c-84c0ac3267c2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.865256 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.865318 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:04:17 crc kubenswrapper[4751]: I0227 17:04:17.865328 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vw9d\" (UniqueName: \"kubernetes.io/projected/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2-kube-api-access-9vw9d\") on node \"crc\" DevicePath \"\"" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.199094 4751 generic.go:334] "Generic (PLEG): container finished" podID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerID="c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425" exitCode=0 Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.199204 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxjgw" event={"ID":"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2","Type":"ContainerDied","Data":"c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425"} Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.199294 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxjgw" event={"ID":"ab96dffb-bdd9-4697-9a6c-84c0ac3267c2","Type":"ContainerDied","Data":"85ca80afb5e19839b1268d41243cb0a92ed1bace36742e19bda3b30759a72792"} Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.199368 4751 scope.go:117] "RemoveContainer" containerID="c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.199783 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mxjgw" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.226386 4751 scope.go:117] "RemoveContainer" containerID="4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.268533 4751 scope.go:117] "RemoveContainer" containerID="dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.272348 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mxjgw"] Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.302653 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mxjgw"] Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.318596 4751 scope.go:117] "RemoveContainer" containerID="c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425" Feb 27 17:04:18 crc kubenswrapper[4751]: E0227 17:04:18.322538 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425\": container with ID starting with c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425 not found: ID does not exist" containerID="c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.322745 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425"} err="failed to get container status \"c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425\": rpc error: code = NotFound desc = could not find container \"c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425\": container with ID starting with c28f86fecba743970a506d5e3083045786c3a39ef552ba75c5ea617aefb1e425 not found: ID does not exist" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.322823 4751 scope.go:117] "RemoveContainer" containerID="4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f" Feb 27 17:04:18 crc kubenswrapper[4751]: E0227 17:04:18.332548 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f\": container with ID starting with 4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f not found: ID does not exist" containerID="4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.332781 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f"} err="failed to get container status \"4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f\": rpc error: code = NotFound desc = could not find container \"4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f\": container with ID starting with 4455f1949b7026770890a2940678268070b8dcc4e91a1bc9397e00cfd8ee5b2f not found: ID does not exist" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.332860 4751 scope.go:117] "RemoveContainer" containerID="dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6" Feb 27 17:04:18 crc kubenswrapper[4751]: E0227 17:04:18.336507 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6\": container with ID starting with dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6 not found: ID does not exist" containerID="dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.336663 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6"} err="failed to get container status \"dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6\": rpc error: code = NotFound desc = could not find container \"dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6\": container with ID starting with dd8790e0993f8a8603e4af20997803d0da98b2c3baf66cc450daa0bf7ebcadd6 not found: ID does not exist" Feb 27 17:04:18 crc kubenswrapper[4751]: I0227 17:04:18.530234 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" path="/var/lib/kubelet/pods/ab96dffb-bdd9-4697-9a6c-84c0ac3267c2/volumes" Feb 27 17:04:28 crc kubenswrapper[4751]: I0227 17:04:28.918914 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:04:28 crc kubenswrapper[4751]: I0227 17:04:28.919294 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:04:49 crc kubenswrapper[4751]: I0227 17:04:49.039772 4751 scope.go:117] "RemoveContainer" containerID="cb31a36defa79267ab510bc7b5af87bf5f72f5b361526e138b371c03160885fb" Feb 27 17:04:58 crc kubenswrapper[4751]: I0227 17:04:58.919539 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:04:58 crc kubenswrapper[4751]: I0227 17:04:58.920276 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:05:28 crc kubenswrapper[4751]: I0227 17:05:28.919114 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:05:28 crc kubenswrapper[4751]: I0227 17:05:28.919843 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:05:28 crc kubenswrapper[4751]: I0227 17:05:28.919906 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 17:05:28 crc kubenswrapper[4751]: I0227 17:05:28.920694 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 17:05:28 crc kubenswrapper[4751]: I0227 17:05:28.920793 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" gracePeriod=600 Feb 27 17:05:29 crc kubenswrapper[4751]: E0227 17:05:29.053012 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:05:29 crc kubenswrapper[4751]: I0227 17:05:29.853705 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" exitCode=0 Feb 27 17:05:29 crc kubenswrapper[4751]: I0227 17:05:29.853800 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407"} Feb 27 17:05:29 crc kubenswrapper[4751]: I0227 17:05:29.855616 4751 scope.go:117] "RemoveContainer" containerID="df4b103e4664bc2214fc36d6fc974510e33084f406a93d660aee2115d6a78486" Feb 27 17:05:29 crc kubenswrapper[4751]: I0227 17:05:29.856936 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:05:29 crc kubenswrapper[4751]: E0227 17:05:29.857386 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:05:40 crc kubenswrapper[4751]: I0227 17:05:40.521801 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:05:40 crc kubenswrapper[4751]: E0227 17:05:40.523099 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:05:54 crc kubenswrapper[4751]: I0227 17:05:54.520661 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:05:54 crc kubenswrapper[4751]: E0227 17:05:54.521695 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.161375 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536866-7nrxf"] Feb 27 17:06:00 crc kubenswrapper[4751]: E0227 17:06:00.162255 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerName="extract-utilities" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.162271 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerName="extract-utilities" Feb 27 17:06:00 crc kubenswrapper[4751]: E0227 17:06:00.162295 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerName="extract-content" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.162303 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerName="extract-content" Feb 27 17:06:00 crc kubenswrapper[4751]: E0227 17:06:00.162327 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dc0281f-c3f5-4047-a1fd-a305228ed8a5" containerName="oc" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.162334 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dc0281f-c3f5-4047-a1fd-a305228ed8a5" containerName="oc" Feb 27 17:06:00 crc kubenswrapper[4751]: E0227 17:06:00.162346 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerName="registry-server" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.162353 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerName="registry-server" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.162678 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab96dffb-bdd9-4697-9a6c-84c0ac3267c2" containerName="registry-server" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.162703 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dc0281f-c3f5-4047-a1fd-a305228ed8a5" containerName="oc" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.163337 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536866-7nrxf" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.166752 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.167149 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.167349 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.181797 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536866-7nrxf"] Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.350262 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbx7c\" (UniqueName: \"kubernetes.io/projected/00cd8d48-bc37-44e1-9bfa-ea811897a782-kube-api-access-zbx7c\") pod \"auto-csr-approver-29536866-7nrxf\" (UID: \"00cd8d48-bc37-44e1-9bfa-ea811897a782\") " pod="openshift-infra/auto-csr-approver-29536866-7nrxf" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.451963 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbx7c\" (UniqueName: \"kubernetes.io/projected/00cd8d48-bc37-44e1-9bfa-ea811897a782-kube-api-access-zbx7c\") pod \"auto-csr-approver-29536866-7nrxf\" (UID: \"00cd8d48-bc37-44e1-9bfa-ea811897a782\") " pod="openshift-infra/auto-csr-approver-29536866-7nrxf" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.487504 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbx7c\" (UniqueName: \"kubernetes.io/projected/00cd8d48-bc37-44e1-9bfa-ea811897a782-kube-api-access-zbx7c\") pod \"auto-csr-approver-29536866-7nrxf\" (UID: \"00cd8d48-bc37-44e1-9bfa-ea811897a782\") " pod="openshift-infra/auto-csr-approver-29536866-7nrxf" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.497348 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536866-7nrxf" Feb 27 17:06:00 crc kubenswrapper[4751]: I0227 17:06:00.791989 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536866-7nrxf"] Feb 27 17:06:01 crc kubenswrapper[4751]: I0227 17:06:01.117434 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536866-7nrxf" event={"ID":"00cd8d48-bc37-44e1-9bfa-ea811897a782","Type":"ContainerStarted","Data":"8078651ad095086990d50299da7e5f850725c9221b40f4efdb00cb3a28610d4d"} Feb 27 17:06:04 crc kubenswrapper[4751]: I0227 17:06:04.149880 4751 generic.go:334] "Generic (PLEG): container finished" podID="00cd8d48-bc37-44e1-9bfa-ea811897a782" containerID="670822a51a004e03cfcde757360418fbe21c14652f762f1f891585ce42ae1a42" exitCode=0 Feb 27 17:06:04 crc kubenswrapper[4751]: I0227 17:06:04.150098 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536866-7nrxf" event={"ID":"00cd8d48-bc37-44e1-9bfa-ea811897a782","Type":"ContainerDied","Data":"670822a51a004e03cfcde757360418fbe21c14652f762f1f891585ce42ae1a42"} Feb 27 17:06:05 crc kubenswrapper[4751]: I0227 17:06:05.511485 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536866-7nrxf" Feb 27 17:06:05 crc kubenswrapper[4751]: I0227 17:06:05.520917 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:06:05 crc kubenswrapper[4751]: E0227 17:06:05.521167 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:06:05 crc kubenswrapper[4751]: I0227 17:06:05.658599 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbx7c\" (UniqueName: \"kubernetes.io/projected/00cd8d48-bc37-44e1-9bfa-ea811897a782-kube-api-access-zbx7c\") pod \"00cd8d48-bc37-44e1-9bfa-ea811897a782\" (UID: \"00cd8d48-bc37-44e1-9bfa-ea811897a782\") " Feb 27 17:06:05 crc kubenswrapper[4751]: I0227 17:06:05.666738 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00cd8d48-bc37-44e1-9bfa-ea811897a782-kube-api-access-zbx7c" (OuterVolumeSpecName: "kube-api-access-zbx7c") pod "00cd8d48-bc37-44e1-9bfa-ea811897a782" (UID: "00cd8d48-bc37-44e1-9bfa-ea811897a782"). InnerVolumeSpecName "kube-api-access-zbx7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:06:05 crc kubenswrapper[4751]: I0227 17:06:05.760986 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbx7c\" (UniqueName: \"kubernetes.io/projected/00cd8d48-bc37-44e1-9bfa-ea811897a782-kube-api-access-zbx7c\") on node \"crc\" DevicePath \"\"" Feb 27 17:06:06 crc kubenswrapper[4751]: I0227 17:06:06.173387 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536866-7nrxf" event={"ID":"00cd8d48-bc37-44e1-9bfa-ea811897a782","Type":"ContainerDied","Data":"8078651ad095086990d50299da7e5f850725c9221b40f4efdb00cb3a28610d4d"} Feb 27 17:06:06 crc kubenswrapper[4751]: I0227 17:06:06.173492 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8078651ad095086990d50299da7e5f850725c9221b40f4efdb00cb3a28610d4d" Feb 27 17:06:06 crc kubenswrapper[4751]: I0227 17:06:06.173504 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536866-7nrxf" Feb 27 17:06:06 crc kubenswrapper[4751]: I0227 17:06:06.612026 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536860-xnp4r"] Feb 27 17:06:06 crc kubenswrapper[4751]: I0227 17:06:06.625105 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536860-xnp4r"] Feb 27 17:06:08 crc kubenswrapper[4751]: I0227 17:06:08.537136 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30714fd1-3839-4710-ba0e-1f5325b60efb" path="/var/lib/kubelet/pods/30714fd1-3839-4710-ba0e-1f5325b60efb/volumes" Feb 27 17:06:19 crc kubenswrapper[4751]: I0227 17:06:19.520714 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:06:19 crc kubenswrapper[4751]: E0227 17:06:19.522023 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:06:31 crc kubenswrapper[4751]: I0227 17:06:31.521148 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:06:31 crc kubenswrapper[4751]: E0227 17:06:31.523171 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:06:45 crc kubenswrapper[4751]: I0227 17:06:45.521193 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:06:45 crc kubenswrapper[4751]: E0227 17:06:45.522171 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.154127 4751 scope.go:117] "RemoveContainer" containerID="ac1de1c579943b9ab36bd29820c25a775206ba68b080001b6fea99caa02dbc31" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.733951 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sstvr"] Feb 27 17:06:49 crc kubenswrapper[4751]: E0227 17:06:49.734468 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00cd8d48-bc37-44e1-9bfa-ea811897a782" containerName="oc" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.734492 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="00cd8d48-bc37-44e1-9bfa-ea811897a782" containerName="oc" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.734764 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="00cd8d48-bc37-44e1-9bfa-ea811897a782" containerName="oc" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.736546 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.754757 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sstvr"] Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.834712 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhg2n\" (UniqueName: \"kubernetes.io/projected/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-kube-api-access-fhg2n\") pod \"community-operators-sstvr\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.834775 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-utilities\") pod \"community-operators-sstvr\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.835326 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-catalog-content\") pod \"community-operators-sstvr\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.936545 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhg2n\" (UniqueName: \"kubernetes.io/projected/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-kube-api-access-fhg2n\") pod \"community-operators-sstvr\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.936607 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-utilities\") pod \"community-operators-sstvr\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.936637 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-catalog-content\") pod \"community-operators-sstvr\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.937115 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-catalog-content\") pod \"community-operators-sstvr\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.937246 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-utilities\") pod \"community-operators-sstvr\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:49 crc kubenswrapper[4751]: I0227 17:06:49.962099 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhg2n\" (UniqueName: \"kubernetes.io/projected/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-kube-api-access-fhg2n\") pod \"community-operators-sstvr\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:50 crc kubenswrapper[4751]: I0227 17:06:50.087163 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:06:50 crc kubenswrapper[4751]: I0227 17:06:50.581387 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sstvr"] Feb 27 17:06:51 crc kubenswrapper[4751]: I0227 17:06:51.601920 4751 generic.go:334] "Generic (PLEG): container finished" podID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerID="531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5" exitCode=0 Feb 27 17:06:51 crc kubenswrapper[4751]: I0227 17:06:51.601989 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sstvr" event={"ID":"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0","Type":"ContainerDied","Data":"531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5"} Feb 27 17:06:51 crc kubenswrapper[4751]: I0227 17:06:51.602456 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sstvr" event={"ID":"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0","Type":"ContainerStarted","Data":"a59adcdddaad0d84b484d2a5c558857d10369680e4dd19a8979425d413e48d7d"} Feb 27 17:06:51 crc kubenswrapper[4751]: I0227 17:06:51.605187 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:06:52 crc kubenswrapper[4751]: I0227 17:06:52.615249 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sstvr" event={"ID":"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0","Type":"ContainerStarted","Data":"d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c"} Feb 27 17:06:53 crc kubenswrapper[4751]: I0227 17:06:53.630425 4751 generic.go:334] "Generic (PLEG): container finished" podID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerID="d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c" exitCode=0 Feb 27 17:06:53 crc kubenswrapper[4751]: I0227 17:06:53.631286 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sstvr" event={"ID":"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0","Type":"ContainerDied","Data":"d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c"} Feb 27 17:06:54 crc kubenswrapper[4751]: I0227 17:06:54.644959 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sstvr" event={"ID":"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0","Type":"ContainerStarted","Data":"6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1"} Feb 27 17:06:54 crc kubenswrapper[4751]: I0227 17:06:54.673499 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sstvr" podStartSLOduration=2.960757183 podStartE2EDuration="5.673462155s" podCreationTimestamp="2026-02-27 17:06:49 +0000 UTC" firstStartedPulling="2026-02-27 17:06:51.604769912 +0000 UTC m=+2573.751784389" lastFinishedPulling="2026-02-27 17:06:54.317474904 +0000 UTC m=+2576.464489361" observedRunningTime="2026-02-27 17:06:54.672806058 +0000 UTC m=+2576.819820545" watchObservedRunningTime="2026-02-27 17:06:54.673462155 +0000 UTC m=+2576.820476652" Feb 27 17:06:56 crc kubenswrapper[4751]: I0227 17:06:56.521121 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:06:56 crc kubenswrapper[4751]: E0227 17:06:56.521690 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:07:00 crc kubenswrapper[4751]: I0227 17:07:00.087341 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:07:00 crc kubenswrapper[4751]: I0227 17:07:00.088302 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:07:00 crc kubenswrapper[4751]: I0227 17:07:00.163946 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:07:00 crc kubenswrapper[4751]: I0227 17:07:00.779757 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:07:00 crc kubenswrapper[4751]: I0227 17:07:00.856762 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sstvr"] Feb 27 17:07:02 crc kubenswrapper[4751]: I0227 17:07:02.725997 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sstvr" podUID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerName="registry-server" containerID="cri-o://6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1" gracePeriod=2 Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.236388 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.368568 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-catalog-content\") pod \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.368673 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhg2n\" (UniqueName: \"kubernetes.io/projected/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-kube-api-access-fhg2n\") pod \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.368825 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-utilities\") pod \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\" (UID: \"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0\") " Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.370012 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-utilities" (OuterVolumeSpecName: "utilities") pod "3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" (UID: "3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.377856 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-kube-api-access-fhg2n" (OuterVolumeSpecName: "kube-api-access-fhg2n") pod "3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" (UID: "3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0"). InnerVolumeSpecName "kube-api-access-fhg2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.471227 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.471284 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhg2n\" (UniqueName: \"kubernetes.io/projected/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-kube-api-access-fhg2n\") on node \"crc\" DevicePath \"\"" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.738338 4751 generic.go:334] "Generic (PLEG): container finished" podID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerID="6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1" exitCode=0 Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.738492 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sstvr" event={"ID":"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0","Type":"ContainerDied","Data":"6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1"} Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.738959 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sstvr" event={"ID":"3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0","Type":"ContainerDied","Data":"a59adcdddaad0d84b484d2a5c558857d10369680e4dd19a8979425d413e48d7d"} Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.738560 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sstvr" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.738998 4751 scope.go:117] "RemoveContainer" containerID="6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.762121 4751 scope.go:117] "RemoveContainer" containerID="d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.784317 4751 scope.go:117] "RemoveContainer" containerID="531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.805175 4751 scope.go:117] "RemoveContainer" containerID="6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1" Feb 27 17:07:03 crc kubenswrapper[4751]: E0227 17:07:03.805957 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1\": container with ID starting with 6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1 not found: ID does not exist" containerID="6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.806058 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1"} err="failed to get container status \"6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1\": rpc error: code = NotFound desc = could not find container \"6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1\": container with ID starting with 6f5a834f996c95efbd07b16b417b4b8b09f6be48e608ac902e789c46da96fbf1 not found: ID does not exist" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.806093 4751 scope.go:117] "RemoveContainer" containerID="d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c" Feb 27 17:07:03 crc kubenswrapper[4751]: E0227 17:07:03.807489 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c\": container with ID starting with d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c not found: ID does not exist" containerID="d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.807540 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c"} err="failed to get container status \"d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c\": rpc error: code = NotFound desc = could not find container \"d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c\": container with ID starting with d7301473b04e966f96dfb55d6903f269bd88d94bd3416a0a8aa313600a08b78c not found: ID does not exist" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.807568 4751 scope.go:117] "RemoveContainer" containerID="531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5" Feb 27 17:07:03 crc kubenswrapper[4751]: E0227 17:07:03.808127 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5\": container with ID starting with 531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5 not found: ID does not exist" containerID="531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.808166 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5"} err="failed to get container status \"531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5\": rpc error: code = NotFound desc = could not find container \"531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5\": container with ID starting with 531a0ab321d0763ffaeacb999f0e48ffb5ff59ff82a008ebcd18388a078ae5c5 not found: ID does not exist" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.814257 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" (UID: "3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:07:03 crc kubenswrapper[4751]: I0227 17:07:03.879808 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:07:04 crc kubenswrapper[4751]: I0227 17:07:04.077093 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sstvr"] Feb 27 17:07:04 crc kubenswrapper[4751]: I0227 17:07:04.083184 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sstvr"] Feb 27 17:07:04 crc kubenswrapper[4751]: I0227 17:07:04.532979 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" path="/var/lib/kubelet/pods/3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0/volumes" Feb 27 17:07:10 crc kubenswrapper[4751]: I0227 17:07:10.520714 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:07:10 crc kubenswrapper[4751]: E0227 17:07:10.521462 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:07:25 crc kubenswrapper[4751]: I0227 17:07:25.521827 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:07:25 crc kubenswrapper[4751]: E0227 17:07:25.522830 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:07:40 crc kubenswrapper[4751]: I0227 17:07:40.521908 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:07:40 crc kubenswrapper[4751]: E0227 17:07:40.522861 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:07:53 crc kubenswrapper[4751]: I0227 17:07:53.521046 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:07:53 crc kubenswrapper[4751]: E0227 17:07:53.522251 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.161258 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536868-4s685"] Feb 27 17:08:00 crc kubenswrapper[4751]: E0227 17:08:00.162492 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerName="registry-server" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.162516 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerName="registry-server" Feb 27 17:08:00 crc kubenswrapper[4751]: E0227 17:08:00.162557 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerName="extract-utilities" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.162570 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerName="extract-utilities" Feb 27 17:08:00 crc kubenswrapper[4751]: E0227 17:08:00.162587 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerName="extract-content" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.162600 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerName="extract-content" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.162882 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d50cbdf-d9c4-4c41-8f20-aba7f5d3e5f0" containerName="registry-server" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.163652 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536868-4s685" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.166612 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.166996 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.167558 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.168762 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536868-4s685"] Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.236907 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psbvp\" (UniqueName: \"kubernetes.io/projected/282476e3-2137-4ef2-991e-a155758077b8-kube-api-access-psbvp\") pod \"auto-csr-approver-29536868-4s685\" (UID: \"282476e3-2137-4ef2-991e-a155758077b8\") " pod="openshift-infra/auto-csr-approver-29536868-4s685" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.337889 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psbvp\" (UniqueName: \"kubernetes.io/projected/282476e3-2137-4ef2-991e-a155758077b8-kube-api-access-psbvp\") pod \"auto-csr-approver-29536868-4s685\" (UID: \"282476e3-2137-4ef2-991e-a155758077b8\") " pod="openshift-infra/auto-csr-approver-29536868-4s685" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.372338 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psbvp\" (UniqueName: \"kubernetes.io/projected/282476e3-2137-4ef2-991e-a155758077b8-kube-api-access-psbvp\") pod \"auto-csr-approver-29536868-4s685\" (UID: \"282476e3-2137-4ef2-991e-a155758077b8\") " pod="openshift-infra/auto-csr-approver-29536868-4s685" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.485976 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536868-4s685" Feb 27 17:08:00 crc kubenswrapper[4751]: I0227 17:08:00.764951 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536868-4s685"] Feb 27 17:08:01 crc kubenswrapper[4751]: I0227 17:08:01.272694 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536868-4s685" event={"ID":"282476e3-2137-4ef2-991e-a155758077b8","Type":"ContainerStarted","Data":"41395593800ade7c12ae34b39828df15230903b6fa7b166c74bee92337486325"} Feb 27 17:08:02 crc kubenswrapper[4751]: I0227 17:08:02.284370 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536868-4s685" event={"ID":"282476e3-2137-4ef2-991e-a155758077b8","Type":"ContainerStarted","Data":"7dacc719a708791b905544cf062d6e36e74e884e55e9b0902e313c7c8e51d2ae"} Feb 27 17:08:02 crc kubenswrapper[4751]: I0227 17:08:02.308785 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536868-4s685" podStartSLOduration=1.400658724 podStartE2EDuration="2.308759148s" podCreationTimestamp="2026-02-27 17:08:00 +0000 UTC" firstStartedPulling="2026-02-27 17:08:00.803472739 +0000 UTC m=+2642.950487186" lastFinishedPulling="2026-02-27 17:08:01.711573133 +0000 UTC m=+2643.858587610" observedRunningTime="2026-02-27 17:08:02.300330178 +0000 UTC m=+2644.447344665" watchObservedRunningTime="2026-02-27 17:08:02.308759148 +0000 UTC m=+2644.455773635" Feb 27 17:08:03 crc kubenswrapper[4751]: I0227 17:08:03.297663 4751 generic.go:334] "Generic (PLEG): container finished" podID="282476e3-2137-4ef2-991e-a155758077b8" containerID="7dacc719a708791b905544cf062d6e36e74e884e55e9b0902e313c7c8e51d2ae" exitCode=0 Feb 27 17:08:03 crc kubenswrapper[4751]: I0227 17:08:03.297739 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536868-4s685" event={"ID":"282476e3-2137-4ef2-991e-a155758077b8","Type":"ContainerDied","Data":"7dacc719a708791b905544cf062d6e36e74e884e55e9b0902e313c7c8e51d2ae"} Feb 27 17:08:04 crc kubenswrapper[4751]: I0227 17:08:04.643579 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536868-4s685" Feb 27 17:08:04 crc kubenswrapper[4751]: I0227 17:08:04.817704 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psbvp\" (UniqueName: \"kubernetes.io/projected/282476e3-2137-4ef2-991e-a155758077b8-kube-api-access-psbvp\") pod \"282476e3-2137-4ef2-991e-a155758077b8\" (UID: \"282476e3-2137-4ef2-991e-a155758077b8\") " Feb 27 17:08:04 crc kubenswrapper[4751]: I0227 17:08:04.825215 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/282476e3-2137-4ef2-991e-a155758077b8-kube-api-access-psbvp" (OuterVolumeSpecName: "kube-api-access-psbvp") pod "282476e3-2137-4ef2-991e-a155758077b8" (UID: "282476e3-2137-4ef2-991e-a155758077b8"). InnerVolumeSpecName "kube-api-access-psbvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:08:04 crc kubenswrapper[4751]: I0227 17:08:04.919520 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psbvp\" (UniqueName: \"kubernetes.io/projected/282476e3-2137-4ef2-991e-a155758077b8-kube-api-access-psbvp\") on node \"crc\" DevicePath \"\"" Feb 27 17:08:05 crc kubenswrapper[4751]: I0227 17:08:05.316089 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536868-4s685" event={"ID":"282476e3-2137-4ef2-991e-a155758077b8","Type":"ContainerDied","Data":"41395593800ade7c12ae34b39828df15230903b6fa7b166c74bee92337486325"} Feb 27 17:08:05 crc kubenswrapper[4751]: I0227 17:08:05.316148 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="41395593800ade7c12ae34b39828df15230903b6fa7b166c74bee92337486325" Feb 27 17:08:05 crc kubenswrapper[4751]: I0227 17:08:05.316185 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536868-4s685" Feb 27 17:08:05 crc kubenswrapper[4751]: I0227 17:08:05.398032 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536862-k66j6"] Feb 27 17:08:05 crc kubenswrapper[4751]: I0227 17:08:05.405266 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536862-k66j6"] Feb 27 17:08:06 crc kubenswrapper[4751]: I0227 17:08:06.539130 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87d84eef-d1b9-4496-9a93-758b8719da36" path="/var/lib/kubelet/pods/87d84eef-d1b9-4496-9a93-758b8719da36/volumes" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.011862 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vm2d5"] Feb 27 17:08:07 crc kubenswrapper[4751]: E0227 17:08:07.017606 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="282476e3-2137-4ef2-991e-a155758077b8" containerName="oc" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.017690 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="282476e3-2137-4ef2-991e-a155758077b8" containerName="oc" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.018160 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="282476e3-2137-4ef2-991e-a155758077b8" containerName="oc" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.020159 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.034009 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vm2d5"] Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.158089 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-catalog-content\") pod \"redhat-operators-vm2d5\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.158773 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgsqw\" (UniqueName: \"kubernetes.io/projected/641252ce-91b8-40bf-a756-23643586710c-kube-api-access-xgsqw\") pod \"redhat-operators-vm2d5\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.158838 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-utilities\") pod \"redhat-operators-vm2d5\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.261057 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgsqw\" (UniqueName: \"kubernetes.io/projected/641252ce-91b8-40bf-a756-23643586710c-kube-api-access-xgsqw\") pod \"redhat-operators-vm2d5\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.261104 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-utilities\") pod \"redhat-operators-vm2d5\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.261192 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-catalog-content\") pod \"redhat-operators-vm2d5\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.261757 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-catalog-content\") pod \"redhat-operators-vm2d5\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.262228 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-utilities\") pod \"redhat-operators-vm2d5\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.287387 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgsqw\" (UniqueName: \"kubernetes.io/projected/641252ce-91b8-40bf-a756-23643586710c-kube-api-access-xgsqw\") pod \"redhat-operators-vm2d5\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.362705 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.521262 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:08:07 crc kubenswrapper[4751]: E0227 17:08:07.521839 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:08:07 crc kubenswrapper[4751]: I0227 17:08:07.669874 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vm2d5"] Feb 27 17:08:08 crc kubenswrapper[4751]: I0227 17:08:08.342055 4751 generic.go:334] "Generic (PLEG): container finished" podID="641252ce-91b8-40bf-a756-23643586710c" containerID="2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd" exitCode=0 Feb 27 17:08:08 crc kubenswrapper[4751]: I0227 17:08:08.342159 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vm2d5" event={"ID":"641252ce-91b8-40bf-a756-23643586710c","Type":"ContainerDied","Data":"2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd"} Feb 27 17:08:08 crc kubenswrapper[4751]: I0227 17:08:08.342379 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vm2d5" event={"ID":"641252ce-91b8-40bf-a756-23643586710c","Type":"ContainerStarted","Data":"3398196ac8b82c2ae659ab85d90a943a317245d30249f929d3f7e8ea317a4f6a"} Feb 27 17:08:09 crc kubenswrapper[4751]: I0227 17:08:09.352913 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vm2d5" event={"ID":"641252ce-91b8-40bf-a756-23643586710c","Type":"ContainerStarted","Data":"82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9"} Feb 27 17:08:10 crc kubenswrapper[4751]: I0227 17:08:10.363948 4751 generic.go:334] "Generic (PLEG): container finished" podID="641252ce-91b8-40bf-a756-23643586710c" containerID="82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9" exitCode=0 Feb 27 17:08:10 crc kubenswrapper[4751]: I0227 17:08:10.364085 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vm2d5" event={"ID":"641252ce-91b8-40bf-a756-23643586710c","Type":"ContainerDied","Data":"82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9"} Feb 27 17:08:11 crc kubenswrapper[4751]: I0227 17:08:11.377722 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vm2d5" event={"ID":"641252ce-91b8-40bf-a756-23643586710c","Type":"ContainerStarted","Data":"2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d"} Feb 27 17:08:11 crc kubenswrapper[4751]: I0227 17:08:11.403121 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vm2d5" podStartSLOduration=2.961365237 podStartE2EDuration="5.403100789s" podCreationTimestamp="2026-02-27 17:08:06 +0000 UTC" firstStartedPulling="2026-02-27 17:08:08.343598395 +0000 UTC m=+2650.490612842" lastFinishedPulling="2026-02-27 17:08:10.785333907 +0000 UTC m=+2652.932348394" observedRunningTime="2026-02-27 17:08:11.401832176 +0000 UTC m=+2653.548846673" watchObservedRunningTime="2026-02-27 17:08:11.403100789 +0000 UTC m=+2653.550115246" Feb 27 17:08:17 crc kubenswrapper[4751]: I0227 17:08:17.362908 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:17 crc kubenswrapper[4751]: I0227 17:08:17.364096 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:18 crc kubenswrapper[4751]: I0227 17:08:18.434538 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vm2d5" podUID="641252ce-91b8-40bf-a756-23643586710c" containerName="registry-server" probeResult="failure" output=< Feb 27 17:08:18 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 17:08:18 crc kubenswrapper[4751]: > Feb 27 17:08:19 crc kubenswrapper[4751]: I0227 17:08:19.520506 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:08:19 crc kubenswrapper[4751]: E0227 17:08:19.521886 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:08:27 crc kubenswrapper[4751]: I0227 17:08:27.418873 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:27 crc kubenswrapper[4751]: I0227 17:08:27.481068 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:27 crc kubenswrapper[4751]: I0227 17:08:27.668904 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vm2d5"] Feb 27 17:08:28 crc kubenswrapper[4751]: I0227 17:08:28.536222 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vm2d5" podUID="641252ce-91b8-40bf-a756-23643586710c" containerName="registry-server" containerID="cri-o://2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d" gracePeriod=2 Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.030069 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.221245 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-catalog-content\") pod \"641252ce-91b8-40bf-a756-23643586710c\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.221485 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-utilities\") pod \"641252ce-91b8-40bf-a756-23643586710c\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.222782 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-utilities" (OuterVolumeSpecName: "utilities") pod "641252ce-91b8-40bf-a756-23643586710c" (UID: "641252ce-91b8-40bf-a756-23643586710c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.222885 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgsqw\" (UniqueName: \"kubernetes.io/projected/641252ce-91b8-40bf-a756-23643586710c-kube-api-access-xgsqw\") pod \"641252ce-91b8-40bf-a756-23643586710c\" (UID: \"641252ce-91b8-40bf-a756-23643586710c\") " Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.223927 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.227753 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/641252ce-91b8-40bf-a756-23643586710c-kube-api-access-xgsqw" (OuterVolumeSpecName: "kube-api-access-xgsqw") pod "641252ce-91b8-40bf-a756-23643586710c" (UID: "641252ce-91b8-40bf-a756-23643586710c"). InnerVolumeSpecName "kube-api-access-xgsqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.326328 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgsqw\" (UniqueName: \"kubernetes.io/projected/641252ce-91b8-40bf-a756-23643586710c-kube-api-access-xgsqw\") on node \"crc\" DevicePath \"\"" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.409897 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "641252ce-91b8-40bf-a756-23643586710c" (UID: "641252ce-91b8-40bf-a756-23643586710c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.427867 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/641252ce-91b8-40bf-a756-23643586710c-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.551756 4751 generic.go:334] "Generic (PLEG): container finished" podID="641252ce-91b8-40bf-a756-23643586710c" containerID="2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d" exitCode=0 Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.551822 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vm2d5" event={"ID":"641252ce-91b8-40bf-a756-23643586710c","Type":"ContainerDied","Data":"2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d"} Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.551866 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vm2d5" event={"ID":"641252ce-91b8-40bf-a756-23643586710c","Type":"ContainerDied","Data":"3398196ac8b82c2ae659ab85d90a943a317245d30249f929d3f7e8ea317a4f6a"} Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.551893 4751 scope.go:117] "RemoveContainer" containerID="2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.551827 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vm2d5" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.582711 4751 scope.go:117] "RemoveContainer" containerID="82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.613648 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vm2d5"] Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.619833 4751 scope.go:117] "RemoveContainer" containerID="2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.628466 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vm2d5"] Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.664505 4751 scope.go:117] "RemoveContainer" containerID="2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d" Feb 27 17:08:29 crc kubenswrapper[4751]: E0227 17:08:29.665008 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d\": container with ID starting with 2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d not found: ID does not exist" containerID="2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.665085 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d"} err="failed to get container status \"2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d\": rpc error: code = NotFound desc = could not find container \"2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d\": container with ID starting with 2b781c801e441e99f439af343d3f608c4f0f1829850d49c0447f58c9148ff71d not found: ID does not exist" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.665133 4751 scope.go:117] "RemoveContainer" containerID="82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9" Feb 27 17:08:29 crc kubenswrapper[4751]: E0227 17:08:29.665888 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9\": container with ID starting with 82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9 not found: ID does not exist" containerID="82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.665928 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9"} err="failed to get container status \"82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9\": rpc error: code = NotFound desc = could not find container \"82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9\": container with ID starting with 82ac73c6c555d079cb4102d5877c29ef86d1d48ac750ddf05da1b1bf4ab49fa9 not found: ID does not exist" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.665955 4751 scope.go:117] "RemoveContainer" containerID="2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd" Feb 27 17:08:29 crc kubenswrapper[4751]: E0227 17:08:29.666550 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd\": container with ID starting with 2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd not found: ID does not exist" containerID="2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd" Feb 27 17:08:29 crc kubenswrapper[4751]: I0227 17:08:29.666611 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd"} err="failed to get container status \"2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd\": rpc error: code = NotFound desc = could not find container \"2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd\": container with ID starting with 2e085e543d98f0407298ca1d5b9eee159a3a06a037962c8225198af1501b27bd not found: ID does not exist" Feb 27 17:08:30 crc kubenswrapper[4751]: I0227 17:08:30.538814 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="641252ce-91b8-40bf-a756-23643586710c" path="/var/lib/kubelet/pods/641252ce-91b8-40bf-a756-23643586710c/volumes" Feb 27 17:08:32 crc kubenswrapper[4751]: I0227 17:08:32.521332 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:08:32 crc kubenswrapper[4751]: E0227 17:08:32.522053 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:08:44 crc kubenswrapper[4751]: I0227 17:08:44.521033 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:08:44 crc kubenswrapper[4751]: E0227 17:08:44.522343 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:08:49 crc kubenswrapper[4751]: I0227 17:08:49.297057 4751 scope.go:117] "RemoveContainer" containerID="3ef0dda66111a89aa652b91ad74327fc1f11986918cfca5b2d065df55f71de29" Feb 27 17:08:58 crc kubenswrapper[4751]: I0227 17:08:58.523860 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:08:58 crc kubenswrapper[4751]: E0227 17:08:58.525330 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:09:10 crc kubenswrapper[4751]: I0227 17:09:10.520366 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:09:10 crc kubenswrapper[4751]: E0227 17:09:10.521325 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:09:22 crc kubenswrapper[4751]: I0227 17:09:22.521324 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:09:22 crc kubenswrapper[4751]: E0227 17:09:22.522731 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:09:33 crc kubenswrapper[4751]: I0227 17:09:33.520971 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:09:33 crc kubenswrapper[4751]: E0227 17:09:33.522089 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:09:45 crc kubenswrapper[4751]: I0227 17:09:45.521105 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:09:45 crc kubenswrapper[4751]: E0227 17:09:45.521806 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:09:56 crc kubenswrapper[4751]: I0227 17:09:56.520557 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:09:56 crc kubenswrapper[4751]: E0227 17:09:56.521082 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.159231 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536870-bxl78"] Feb 27 17:10:00 crc kubenswrapper[4751]: E0227 17:10:00.160333 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="641252ce-91b8-40bf-a756-23643586710c" containerName="extract-utilities" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.160352 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="641252ce-91b8-40bf-a756-23643586710c" containerName="extract-utilities" Feb 27 17:10:00 crc kubenswrapper[4751]: E0227 17:10:00.160386 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="641252ce-91b8-40bf-a756-23643586710c" containerName="extract-content" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.160543 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="641252ce-91b8-40bf-a756-23643586710c" containerName="extract-content" Feb 27 17:10:00 crc kubenswrapper[4751]: E0227 17:10:00.160564 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="641252ce-91b8-40bf-a756-23643586710c" containerName="registry-server" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.160575 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="641252ce-91b8-40bf-a756-23643586710c" containerName="registry-server" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.160972 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="641252ce-91b8-40bf-a756-23643586710c" containerName="registry-server" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.161522 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536870-bxl78" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.169111 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536870-bxl78"] Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.170167 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.170237 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.170584 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.294649 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsmqr\" (UniqueName: \"kubernetes.io/projected/cc8bb46d-9242-4d86-9e94-0bc27aa48ed9-kube-api-access-vsmqr\") pod \"auto-csr-approver-29536870-bxl78\" (UID: \"cc8bb46d-9242-4d86-9e94-0bc27aa48ed9\") " pod="openshift-infra/auto-csr-approver-29536870-bxl78" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.396047 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsmqr\" (UniqueName: \"kubernetes.io/projected/cc8bb46d-9242-4d86-9e94-0bc27aa48ed9-kube-api-access-vsmqr\") pod \"auto-csr-approver-29536870-bxl78\" (UID: \"cc8bb46d-9242-4d86-9e94-0bc27aa48ed9\") " pod="openshift-infra/auto-csr-approver-29536870-bxl78" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.424637 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsmqr\" (UniqueName: \"kubernetes.io/projected/cc8bb46d-9242-4d86-9e94-0bc27aa48ed9-kube-api-access-vsmqr\") pod \"auto-csr-approver-29536870-bxl78\" (UID: \"cc8bb46d-9242-4d86-9e94-0bc27aa48ed9\") " pod="openshift-infra/auto-csr-approver-29536870-bxl78" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.495671 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536870-bxl78" Feb 27 17:10:00 crc kubenswrapper[4751]: I0227 17:10:00.970925 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536870-bxl78"] Feb 27 17:10:01 crc kubenswrapper[4751]: I0227 17:10:01.461455 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536870-bxl78" event={"ID":"cc8bb46d-9242-4d86-9e94-0bc27aa48ed9","Type":"ContainerStarted","Data":"310e3fb57c9f4884bb49f271acc9ecd52ebb5230a43e37c3ab13960e8d4da3c9"} Feb 27 17:10:02 crc kubenswrapper[4751]: I0227 17:10:02.471752 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536870-bxl78" event={"ID":"cc8bb46d-9242-4d86-9e94-0bc27aa48ed9","Type":"ContainerStarted","Data":"487721ea50edd67a10b916c78b307bef7669fc1bca2ead0bc111cbd6e01d2ba4"} Feb 27 17:10:02 crc kubenswrapper[4751]: I0227 17:10:02.496724 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536870-bxl78" podStartSLOduration=1.473778273 podStartE2EDuration="2.496697055s" podCreationTimestamp="2026-02-27 17:10:00 +0000 UTC" firstStartedPulling="2026-02-27 17:10:00.991972369 +0000 UTC m=+2763.138986816" lastFinishedPulling="2026-02-27 17:10:02.014891121 +0000 UTC m=+2764.161905598" observedRunningTime="2026-02-27 17:10:02.484607461 +0000 UTC m=+2764.631621948" watchObservedRunningTime="2026-02-27 17:10:02.496697055 +0000 UTC m=+2764.643711532" Feb 27 17:10:03 crc kubenswrapper[4751]: I0227 17:10:03.480949 4751 generic.go:334] "Generic (PLEG): container finished" podID="cc8bb46d-9242-4d86-9e94-0bc27aa48ed9" containerID="487721ea50edd67a10b916c78b307bef7669fc1bca2ead0bc111cbd6e01d2ba4" exitCode=0 Feb 27 17:10:03 crc kubenswrapper[4751]: I0227 17:10:03.481007 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536870-bxl78" event={"ID":"cc8bb46d-9242-4d86-9e94-0bc27aa48ed9","Type":"ContainerDied","Data":"487721ea50edd67a10b916c78b307bef7669fc1bca2ead0bc111cbd6e01d2ba4"} Feb 27 17:10:04 crc kubenswrapper[4751]: I0227 17:10:04.829950 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536870-bxl78" Feb 27 17:10:04 crc kubenswrapper[4751]: I0227 17:10:04.978296 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsmqr\" (UniqueName: \"kubernetes.io/projected/cc8bb46d-9242-4d86-9e94-0bc27aa48ed9-kube-api-access-vsmqr\") pod \"cc8bb46d-9242-4d86-9e94-0bc27aa48ed9\" (UID: \"cc8bb46d-9242-4d86-9e94-0bc27aa48ed9\") " Feb 27 17:10:04 crc kubenswrapper[4751]: I0227 17:10:04.985924 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc8bb46d-9242-4d86-9e94-0bc27aa48ed9-kube-api-access-vsmqr" (OuterVolumeSpecName: "kube-api-access-vsmqr") pod "cc8bb46d-9242-4d86-9e94-0bc27aa48ed9" (UID: "cc8bb46d-9242-4d86-9e94-0bc27aa48ed9"). InnerVolumeSpecName "kube-api-access-vsmqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:10:05 crc kubenswrapper[4751]: I0227 17:10:05.083374 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsmqr\" (UniqueName: \"kubernetes.io/projected/cc8bb46d-9242-4d86-9e94-0bc27aa48ed9-kube-api-access-vsmqr\") on node \"crc\" DevicePath \"\"" Feb 27 17:10:05 crc kubenswrapper[4751]: I0227 17:10:05.499565 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536870-bxl78" event={"ID":"cc8bb46d-9242-4d86-9e94-0bc27aa48ed9","Type":"ContainerDied","Data":"310e3fb57c9f4884bb49f271acc9ecd52ebb5230a43e37c3ab13960e8d4da3c9"} Feb 27 17:10:05 crc kubenswrapper[4751]: I0227 17:10:05.499623 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="310e3fb57c9f4884bb49f271acc9ecd52ebb5230a43e37c3ab13960e8d4da3c9" Feb 27 17:10:05 crc kubenswrapper[4751]: I0227 17:10:05.499637 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536870-bxl78" Feb 27 17:10:05 crc kubenswrapper[4751]: I0227 17:10:05.573765 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536864-rwwt8"] Feb 27 17:10:05 crc kubenswrapper[4751]: I0227 17:10:05.580116 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536864-rwwt8"] Feb 27 17:10:06 crc kubenswrapper[4751]: I0227 17:10:06.543042 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dc0281f-c3f5-4047-a1fd-a305228ed8a5" path="/var/lib/kubelet/pods/4dc0281f-c3f5-4047-a1fd-a305228ed8a5/volumes" Feb 27 17:10:10 crc kubenswrapper[4751]: I0227 17:10:10.521071 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:10:10 crc kubenswrapper[4751]: E0227 17:10:10.521758 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:10:23 crc kubenswrapper[4751]: I0227 17:10:23.521357 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:10:23 crc kubenswrapper[4751]: E0227 17:10:23.522353 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:10:35 crc kubenswrapper[4751]: I0227 17:10:35.521686 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:10:35 crc kubenswrapper[4751]: I0227 17:10:35.769773 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"bd8999e1ab4da3e8cda07e501f94f9a857c58ee7c6ff481b7560440bab9e40c0"} Feb 27 17:10:49 crc kubenswrapper[4751]: I0227 17:10:49.409306 4751 scope.go:117] "RemoveContainer" containerID="67d9136de76a65c594cc776684f377177c18c9bd02390971709d96245b681b23" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.161585 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536872-5dslp"] Feb 27 17:12:00 crc kubenswrapper[4751]: E0227 17:12:00.163279 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc8bb46d-9242-4d86-9e94-0bc27aa48ed9" containerName="oc" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.163306 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc8bb46d-9242-4d86-9e94-0bc27aa48ed9" containerName="oc" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.163646 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc8bb46d-9242-4d86-9e94-0bc27aa48ed9" containerName="oc" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.164626 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536872-5dslp" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.167509 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.168002 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.168216 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.175292 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536872-5dslp"] Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.315950 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kscdv\" (UniqueName: \"kubernetes.io/projected/bbd1eb54-185b-4132-b7ea-f6ce2688c71c-kube-api-access-kscdv\") pod \"auto-csr-approver-29536872-5dslp\" (UID: \"bbd1eb54-185b-4132-b7ea-f6ce2688c71c\") " pod="openshift-infra/auto-csr-approver-29536872-5dslp" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.417938 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kscdv\" (UniqueName: \"kubernetes.io/projected/bbd1eb54-185b-4132-b7ea-f6ce2688c71c-kube-api-access-kscdv\") pod \"auto-csr-approver-29536872-5dslp\" (UID: \"bbd1eb54-185b-4132-b7ea-f6ce2688c71c\") " pod="openshift-infra/auto-csr-approver-29536872-5dslp" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.447672 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kscdv\" (UniqueName: \"kubernetes.io/projected/bbd1eb54-185b-4132-b7ea-f6ce2688c71c-kube-api-access-kscdv\") pod \"auto-csr-approver-29536872-5dslp\" (UID: \"bbd1eb54-185b-4132-b7ea-f6ce2688c71c\") " pod="openshift-infra/auto-csr-approver-29536872-5dslp" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.493229 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536872-5dslp" Feb 27 17:12:00 crc kubenswrapper[4751]: I0227 17:12:00.994633 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:12:01 crc kubenswrapper[4751]: I0227 17:12:01.000077 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536872-5dslp"] Feb 27 17:12:01 crc kubenswrapper[4751]: I0227 17:12:01.632297 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536872-5dslp" event={"ID":"bbd1eb54-185b-4132-b7ea-f6ce2688c71c","Type":"ContainerStarted","Data":"20b25566ca56068ab4775760d2cbc26ab9f1add7e6b088e86d72308450d22f64"} Feb 27 17:12:02 crc kubenswrapper[4751]: I0227 17:12:02.647733 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536872-5dslp" event={"ID":"bbd1eb54-185b-4132-b7ea-f6ce2688c71c","Type":"ContainerStarted","Data":"7d910b7111ec218ef847e3fdc11cd9f1242b1257246181d340d7bc37558f1a34"} Feb 27 17:12:02 crc kubenswrapper[4751]: I0227 17:12:02.670358 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536872-5dslp" podStartSLOduration=1.509573614 podStartE2EDuration="2.670329734s" podCreationTimestamp="2026-02-27 17:12:00 +0000 UTC" firstStartedPulling="2026-02-27 17:12:00.994301779 +0000 UTC m=+2883.141316226" lastFinishedPulling="2026-02-27 17:12:02.155057869 +0000 UTC m=+2884.302072346" observedRunningTime="2026-02-27 17:12:02.662794827 +0000 UTC m=+2884.809809284" watchObservedRunningTime="2026-02-27 17:12:02.670329734 +0000 UTC m=+2884.817344431" Feb 27 17:12:02 crc kubenswrapper[4751]: E0227 17:12:02.794968 4751 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbbd1eb54_185b_4132_b7ea_f6ce2688c71c.slice/crio-7d910b7111ec218ef847e3fdc11cd9f1242b1257246181d340d7bc37558f1a34.scope\": RecentStats: unable to find data in memory cache]" Feb 27 17:12:03 crc kubenswrapper[4751]: I0227 17:12:03.665010 4751 generic.go:334] "Generic (PLEG): container finished" podID="bbd1eb54-185b-4132-b7ea-f6ce2688c71c" containerID="7d910b7111ec218ef847e3fdc11cd9f1242b1257246181d340d7bc37558f1a34" exitCode=0 Feb 27 17:12:03 crc kubenswrapper[4751]: I0227 17:12:03.665163 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536872-5dslp" event={"ID":"bbd1eb54-185b-4132-b7ea-f6ce2688c71c","Type":"ContainerDied","Data":"7d910b7111ec218ef847e3fdc11cd9f1242b1257246181d340d7bc37558f1a34"} Feb 27 17:12:05 crc kubenswrapper[4751]: I0227 17:12:05.096473 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536872-5dslp" Feb 27 17:12:05 crc kubenswrapper[4751]: I0227 17:12:05.216210 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kscdv\" (UniqueName: \"kubernetes.io/projected/bbd1eb54-185b-4132-b7ea-f6ce2688c71c-kube-api-access-kscdv\") pod \"bbd1eb54-185b-4132-b7ea-f6ce2688c71c\" (UID: \"bbd1eb54-185b-4132-b7ea-f6ce2688c71c\") " Feb 27 17:12:05 crc kubenswrapper[4751]: I0227 17:12:05.222648 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbd1eb54-185b-4132-b7ea-f6ce2688c71c-kube-api-access-kscdv" (OuterVolumeSpecName: "kube-api-access-kscdv") pod "bbd1eb54-185b-4132-b7ea-f6ce2688c71c" (UID: "bbd1eb54-185b-4132-b7ea-f6ce2688c71c"). InnerVolumeSpecName "kube-api-access-kscdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:12:05 crc kubenswrapper[4751]: I0227 17:12:05.318544 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kscdv\" (UniqueName: \"kubernetes.io/projected/bbd1eb54-185b-4132-b7ea-f6ce2688c71c-kube-api-access-kscdv\") on node \"crc\" DevicePath \"\"" Feb 27 17:12:05 crc kubenswrapper[4751]: I0227 17:12:05.697457 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536872-5dslp" event={"ID":"bbd1eb54-185b-4132-b7ea-f6ce2688c71c","Type":"ContainerDied","Data":"20b25566ca56068ab4775760d2cbc26ab9f1add7e6b088e86d72308450d22f64"} Feb 27 17:12:05 crc kubenswrapper[4751]: I0227 17:12:05.698200 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20b25566ca56068ab4775760d2cbc26ab9f1add7e6b088e86d72308450d22f64" Feb 27 17:12:05 crc kubenswrapper[4751]: I0227 17:12:05.698487 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536872-5dslp" Feb 27 17:12:05 crc kubenswrapper[4751]: I0227 17:12:05.751312 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536866-7nrxf"] Feb 27 17:12:05 crc kubenswrapper[4751]: I0227 17:12:05.762346 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536866-7nrxf"] Feb 27 17:12:06 crc kubenswrapper[4751]: I0227 17:12:06.537498 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00cd8d48-bc37-44e1-9bfa-ea811897a782" path="/var/lib/kubelet/pods/00cd8d48-bc37-44e1-9bfa-ea811897a782/volumes" Feb 27 17:12:49 crc kubenswrapper[4751]: I0227 17:12:49.500995 4751 scope.go:117] "RemoveContainer" containerID="670822a51a004e03cfcde757360418fbe21c14652f762f1f891585ce42ae1a42" Feb 27 17:12:58 crc kubenswrapper[4751]: I0227 17:12:58.918560 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:12:58 crc kubenswrapper[4751]: I0227 17:12:58.919470 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:13:28 crc kubenswrapper[4751]: I0227 17:13:28.919151 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:13:28 crc kubenswrapper[4751]: I0227 17:13:28.919939 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:13:58 crc kubenswrapper[4751]: I0227 17:13:58.918132 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:13:58 crc kubenswrapper[4751]: I0227 17:13:58.918660 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:13:58 crc kubenswrapper[4751]: I0227 17:13:58.918712 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 17:13:58 crc kubenswrapper[4751]: I0227 17:13:58.919324 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bd8999e1ab4da3e8cda07e501f94f9a857c58ee7c6ff481b7560440bab9e40c0"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 17:13:58 crc kubenswrapper[4751]: I0227 17:13:58.919384 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://bd8999e1ab4da3e8cda07e501f94f9a857c58ee7c6ff481b7560440bab9e40c0" gracePeriod=600 Feb 27 17:13:59 crc kubenswrapper[4751]: I0227 17:13:59.804472 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="bd8999e1ab4da3e8cda07e501f94f9a857c58ee7c6ff481b7560440bab9e40c0" exitCode=0 Feb 27 17:13:59 crc kubenswrapper[4751]: I0227 17:13:59.804525 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"bd8999e1ab4da3e8cda07e501f94f9a857c58ee7c6ff481b7560440bab9e40c0"} Feb 27 17:13:59 crc kubenswrapper[4751]: I0227 17:13:59.804866 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49"} Feb 27 17:13:59 crc kubenswrapper[4751]: I0227 17:13:59.804897 4751 scope.go:117] "RemoveContainer" containerID="3fdf48f179be90dc229abdf61cfbf688daa1613b01128f9ee6dfdcc52c1a6407" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.151147 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536874-w4nmg"] Feb 27 17:14:00 crc kubenswrapper[4751]: E0227 17:14:00.151876 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbd1eb54-185b-4132-b7ea-f6ce2688c71c" containerName="oc" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.151893 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbd1eb54-185b-4132-b7ea-f6ce2688c71c" containerName="oc" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.152078 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbd1eb54-185b-4132-b7ea-f6ce2688c71c" containerName="oc" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.152727 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536874-w4nmg" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.159142 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.159491 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.159542 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.178571 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536874-w4nmg"] Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.267241 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlgjd\" (UniqueName: \"kubernetes.io/projected/62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5-kube-api-access-rlgjd\") pod \"auto-csr-approver-29536874-w4nmg\" (UID: \"62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5\") " pod="openshift-infra/auto-csr-approver-29536874-w4nmg" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.368988 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlgjd\" (UniqueName: \"kubernetes.io/projected/62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5-kube-api-access-rlgjd\") pod \"auto-csr-approver-29536874-w4nmg\" (UID: \"62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5\") " pod="openshift-infra/auto-csr-approver-29536874-w4nmg" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.393785 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlgjd\" (UniqueName: \"kubernetes.io/projected/62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5-kube-api-access-rlgjd\") pod \"auto-csr-approver-29536874-w4nmg\" (UID: \"62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5\") " pod="openshift-infra/auto-csr-approver-29536874-w4nmg" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.470450 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536874-w4nmg" Feb 27 17:14:00 crc kubenswrapper[4751]: I0227 17:14:00.852206 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536874-w4nmg"] Feb 27 17:14:00 crc kubenswrapper[4751]: W0227 17:14:00.861084 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62c6c6bd_82b4_4b5a_88d7_ebcbf999f3b5.slice/crio-3f06ccac933af5a1718daf130971e577f1eeb6b7defb9418a4a2121071a83ebe WatchSource:0}: Error finding container 3f06ccac933af5a1718daf130971e577f1eeb6b7defb9418a4a2121071a83ebe: Status 404 returned error can't find the container with id 3f06ccac933af5a1718daf130971e577f1eeb6b7defb9418a4a2121071a83ebe Feb 27 17:14:01 crc kubenswrapper[4751]: I0227 17:14:01.828285 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536874-w4nmg" event={"ID":"62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5","Type":"ContainerStarted","Data":"3f06ccac933af5a1718daf130971e577f1eeb6b7defb9418a4a2121071a83ebe"} Feb 27 17:14:02 crc kubenswrapper[4751]: I0227 17:14:02.838145 4751 generic.go:334] "Generic (PLEG): container finished" podID="62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5" containerID="033a86dfe1bf4c9c9edae194f889398cd74163f12d43e5d96a63bf9521b1dcef" exitCode=0 Feb 27 17:14:02 crc kubenswrapper[4751]: I0227 17:14:02.838253 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536874-w4nmg" event={"ID":"62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5","Type":"ContainerDied","Data":"033a86dfe1bf4c9c9edae194f889398cd74163f12d43e5d96a63bf9521b1dcef"} Feb 27 17:14:04 crc kubenswrapper[4751]: I0227 17:14:04.192429 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536874-w4nmg" Feb 27 17:14:04 crc kubenswrapper[4751]: I0227 17:14:04.348646 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlgjd\" (UniqueName: \"kubernetes.io/projected/62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5-kube-api-access-rlgjd\") pod \"62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5\" (UID: \"62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5\") " Feb 27 17:14:04 crc kubenswrapper[4751]: I0227 17:14:04.353426 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5-kube-api-access-rlgjd" (OuterVolumeSpecName: "kube-api-access-rlgjd") pod "62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5" (UID: "62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5"). InnerVolumeSpecName "kube-api-access-rlgjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:14:04 crc kubenswrapper[4751]: I0227 17:14:04.450535 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlgjd\" (UniqueName: \"kubernetes.io/projected/62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5-kube-api-access-rlgjd\") on node \"crc\" DevicePath \"\"" Feb 27 17:14:04 crc kubenswrapper[4751]: I0227 17:14:04.854508 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536874-w4nmg" event={"ID":"62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5","Type":"ContainerDied","Data":"3f06ccac933af5a1718daf130971e577f1eeb6b7defb9418a4a2121071a83ebe"} Feb 27 17:14:04 crc kubenswrapper[4751]: I0227 17:14:04.854823 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f06ccac933af5a1718daf130971e577f1eeb6b7defb9418a4a2121071a83ebe" Feb 27 17:14:04 crc kubenswrapper[4751]: I0227 17:14:04.854605 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536874-w4nmg" Feb 27 17:14:05 crc kubenswrapper[4751]: I0227 17:14:05.275481 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536868-4s685"] Feb 27 17:14:05 crc kubenswrapper[4751]: I0227 17:14:05.282000 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536868-4s685"] Feb 27 17:14:06 crc kubenswrapper[4751]: I0227 17:14:06.536836 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="282476e3-2137-4ef2-991e-a155758077b8" path="/var/lib/kubelet/pods/282476e3-2137-4ef2-991e-a155758077b8/volumes" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.115141 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hcxdr"] Feb 27 17:14:17 crc kubenswrapper[4751]: E0227 17:14:17.116052 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5" containerName="oc" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.116067 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5" containerName="oc" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.116262 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5" containerName="oc" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.117687 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.133594 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hcxdr"] Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.146945 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4rlx\" (UniqueName: \"kubernetes.io/projected/93d25b2e-4a0e-4967-8ad9-a62381925ca3-kube-api-access-b4rlx\") pod \"redhat-marketplace-hcxdr\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.147023 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-catalog-content\") pod \"redhat-marketplace-hcxdr\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.147055 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-utilities\") pod \"redhat-marketplace-hcxdr\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.249151 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4rlx\" (UniqueName: \"kubernetes.io/projected/93d25b2e-4a0e-4967-8ad9-a62381925ca3-kube-api-access-b4rlx\") pod \"redhat-marketplace-hcxdr\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.249283 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-catalog-content\") pod \"redhat-marketplace-hcxdr\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.249328 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-utilities\") pod \"redhat-marketplace-hcxdr\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.249973 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-utilities\") pod \"redhat-marketplace-hcxdr\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.250284 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-catalog-content\") pod \"redhat-marketplace-hcxdr\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.273169 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4rlx\" (UniqueName: \"kubernetes.io/projected/93d25b2e-4a0e-4967-8ad9-a62381925ca3-kube-api-access-b4rlx\") pod \"redhat-marketplace-hcxdr\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.478519 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.939549 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hcxdr"] Feb 27 17:14:17 crc kubenswrapper[4751]: I0227 17:14:17.962217 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hcxdr" event={"ID":"93d25b2e-4a0e-4967-8ad9-a62381925ca3","Type":"ContainerStarted","Data":"b32f6560c05a1cd598758a6182ec8d32cc0b85ccc5c0de28be6e2e3199734768"} Feb 27 17:14:18 crc kubenswrapper[4751]: I0227 17:14:18.970995 4751 generic.go:334] "Generic (PLEG): container finished" podID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerID="c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2" exitCode=0 Feb 27 17:14:18 crc kubenswrapper[4751]: I0227 17:14:18.971138 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hcxdr" event={"ID":"93d25b2e-4a0e-4967-8ad9-a62381925ca3","Type":"ContainerDied","Data":"c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2"} Feb 27 17:14:19 crc kubenswrapper[4751]: I0227 17:14:19.979810 4751 generic.go:334] "Generic (PLEG): container finished" podID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerID="e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49" exitCode=0 Feb 27 17:14:19 crc kubenswrapper[4751]: I0227 17:14:19.979916 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hcxdr" event={"ID":"93d25b2e-4a0e-4967-8ad9-a62381925ca3","Type":"ContainerDied","Data":"e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49"} Feb 27 17:14:20 crc kubenswrapper[4751]: I0227 17:14:20.987376 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hcxdr" event={"ID":"93d25b2e-4a0e-4967-8ad9-a62381925ca3","Type":"ContainerStarted","Data":"a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2"} Feb 27 17:14:21 crc kubenswrapper[4751]: I0227 17:14:21.012928 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hcxdr" podStartSLOduration=2.570212004 podStartE2EDuration="4.012872475s" podCreationTimestamp="2026-02-27 17:14:17 +0000 UTC" firstStartedPulling="2026-02-27 17:14:18.972735005 +0000 UTC m=+3021.119749472" lastFinishedPulling="2026-02-27 17:14:20.415395476 +0000 UTC m=+3022.562409943" observedRunningTime="2026-02-27 17:14:21.009435905 +0000 UTC m=+3023.156450352" watchObservedRunningTime="2026-02-27 17:14:21.012872475 +0000 UTC m=+3023.159886962" Feb 27 17:14:27 crc kubenswrapper[4751]: I0227 17:14:27.479306 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:27 crc kubenswrapper[4751]: I0227 17:14:27.480182 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:27 crc kubenswrapper[4751]: I0227 17:14:27.536698 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:28 crc kubenswrapper[4751]: I0227 17:14:28.102422 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:28 crc kubenswrapper[4751]: I0227 17:14:28.171182 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hcxdr"] Feb 27 17:14:30 crc kubenswrapper[4751]: I0227 17:14:30.062716 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hcxdr" podUID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerName="registry-server" containerID="cri-o://a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2" gracePeriod=2 Feb 27 17:14:30 crc kubenswrapper[4751]: I0227 17:14:30.993873 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.114861 4751 generic.go:334] "Generic (PLEG): container finished" podID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerID="a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2" exitCode=0 Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.114907 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hcxdr" event={"ID":"93d25b2e-4a0e-4967-8ad9-a62381925ca3","Type":"ContainerDied","Data":"a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2"} Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.114935 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hcxdr" event={"ID":"93d25b2e-4a0e-4967-8ad9-a62381925ca3","Type":"ContainerDied","Data":"b32f6560c05a1cd598758a6182ec8d32cc0b85ccc5c0de28be6e2e3199734768"} Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.114950 4751 scope.go:117] "RemoveContainer" containerID="a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.114997 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hcxdr" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.163014 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4rlx\" (UniqueName: \"kubernetes.io/projected/93d25b2e-4a0e-4967-8ad9-a62381925ca3-kube-api-access-b4rlx\") pod \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.163093 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-utilities\") pod \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.163237 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-catalog-content\") pod \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\" (UID: \"93d25b2e-4a0e-4967-8ad9-a62381925ca3\") " Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.164421 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-utilities" (OuterVolumeSpecName: "utilities") pod "93d25b2e-4a0e-4967-8ad9-a62381925ca3" (UID: "93d25b2e-4a0e-4967-8ad9-a62381925ca3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.170137 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93d25b2e-4a0e-4967-8ad9-a62381925ca3-kube-api-access-b4rlx" (OuterVolumeSpecName: "kube-api-access-b4rlx") pod "93d25b2e-4a0e-4967-8ad9-a62381925ca3" (UID: "93d25b2e-4a0e-4967-8ad9-a62381925ca3"). InnerVolumeSpecName "kube-api-access-b4rlx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.190789 4751 scope.go:117] "RemoveContainer" containerID="e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.190846 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93d25b2e-4a0e-4967-8ad9-a62381925ca3" (UID: "93d25b2e-4a0e-4967-8ad9-a62381925ca3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.206858 4751 scope.go:117] "RemoveContainer" containerID="c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.231817 4751 scope.go:117] "RemoveContainer" containerID="a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2" Feb 27 17:14:31 crc kubenswrapper[4751]: E0227 17:14:31.232269 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2\": container with ID starting with a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2 not found: ID does not exist" containerID="a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.232309 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2"} err="failed to get container status \"a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2\": rpc error: code = NotFound desc = could not find container \"a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2\": container with ID starting with a176a636320fd42e2317e16488a5d49c41723c809b30cb4f97ce28ee889b8ea2 not found: ID does not exist" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.232335 4751 scope.go:117] "RemoveContainer" containerID="e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49" Feb 27 17:14:31 crc kubenswrapper[4751]: E0227 17:14:31.232697 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49\": container with ID starting with e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49 not found: ID does not exist" containerID="e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.232733 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49"} err="failed to get container status \"e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49\": rpc error: code = NotFound desc = could not find container \"e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49\": container with ID starting with e141fdc5c6f0e7d7590f767544777f78ba287b974ea991efab13e76792272c49 not found: ID does not exist" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.232754 4751 scope.go:117] "RemoveContainer" containerID="c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2" Feb 27 17:14:31 crc kubenswrapper[4751]: E0227 17:14:31.236028 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2\": container with ID starting with c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2 not found: ID does not exist" containerID="c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.236054 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2"} err="failed to get container status \"c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2\": rpc error: code = NotFound desc = could not find container \"c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2\": container with ID starting with c5d284cc588ddc2edbb01b70c56829e63f520995ff31057b1cba1350771d2bc2 not found: ID does not exist" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.265341 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.265382 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4rlx\" (UniqueName: \"kubernetes.io/projected/93d25b2e-4a0e-4967-8ad9-a62381925ca3-kube-api-access-b4rlx\") on node \"crc\" DevicePath \"\"" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.265449 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93d25b2e-4a0e-4967-8ad9-a62381925ca3-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.455557 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hcxdr"] Feb 27 17:14:31 crc kubenswrapper[4751]: I0227 17:14:31.463073 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hcxdr"] Feb 27 17:14:32 crc kubenswrapper[4751]: I0227 17:14:32.536059 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" path="/var/lib/kubelet/pods/93d25b2e-4a0e-4967-8ad9-a62381925ca3/volumes" Feb 27 17:14:49 crc kubenswrapper[4751]: I0227 17:14:49.610958 4751 scope.go:117] "RemoveContainer" containerID="7dacc719a708791b905544cf062d6e36e74e884e55e9b0902e313c7c8e51d2ae" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.171185 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2"] Feb 27 17:15:00 crc kubenswrapper[4751]: E0227 17:15:00.173964 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerName="extract-utilities" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.174113 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerName="extract-utilities" Feb 27 17:15:00 crc kubenswrapper[4751]: E0227 17:15:00.174264 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerName="extract-content" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.174385 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerName="extract-content" Feb 27 17:15:00 crc kubenswrapper[4751]: E0227 17:15:00.174564 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerName="registry-server" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.174683 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerName="registry-server" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.175071 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="93d25b2e-4a0e-4967-8ad9-a62381925ca3" containerName="registry-server" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.176020 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.178352 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.179300 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2"] Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.180516 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.344170 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b00aca05-48fe-45cb-a464-56fe49532233-config-volume\") pod \"collect-profiles-29536875-6kwl2\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.344931 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhwck\" (UniqueName: \"kubernetes.io/projected/b00aca05-48fe-45cb-a464-56fe49532233-kube-api-access-qhwck\") pod \"collect-profiles-29536875-6kwl2\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.345095 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b00aca05-48fe-45cb-a464-56fe49532233-secret-volume\") pod \"collect-profiles-29536875-6kwl2\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.447021 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b00aca05-48fe-45cb-a464-56fe49532233-config-volume\") pod \"collect-profiles-29536875-6kwl2\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.447190 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhwck\" (UniqueName: \"kubernetes.io/projected/b00aca05-48fe-45cb-a464-56fe49532233-kube-api-access-qhwck\") pod \"collect-profiles-29536875-6kwl2\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.447265 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b00aca05-48fe-45cb-a464-56fe49532233-secret-volume\") pod \"collect-profiles-29536875-6kwl2\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.447909 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b00aca05-48fe-45cb-a464-56fe49532233-config-volume\") pod \"collect-profiles-29536875-6kwl2\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.456035 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b00aca05-48fe-45cb-a464-56fe49532233-secret-volume\") pod \"collect-profiles-29536875-6kwl2\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.479726 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhwck\" (UniqueName: \"kubernetes.io/projected/b00aca05-48fe-45cb-a464-56fe49532233-kube-api-access-qhwck\") pod \"collect-profiles-29536875-6kwl2\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.501570 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:00 crc kubenswrapper[4751]: I0227 17:15:00.732162 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2"] Feb 27 17:15:01 crc kubenswrapper[4751]: I0227 17:15:01.397324 4751 generic.go:334] "Generic (PLEG): container finished" podID="b00aca05-48fe-45cb-a464-56fe49532233" containerID="7a2e2de4029336a2d8820b35c6e297f4713df17516ddaf9d023d6e6b6f38a4a3" exitCode=0 Feb 27 17:15:01 crc kubenswrapper[4751]: I0227 17:15:01.397449 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" event={"ID":"b00aca05-48fe-45cb-a464-56fe49532233","Type":"ContainerDied","Data":"7a2e2de4029336a2d8820b35c6e297f4713df17516ddaf9d023d6e6b6f38a4a3"} Feb 27 17:15:01 crc kubenswrapper[4751]: I0227 17:15:01.397723 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" event={"ID":"b00aca05-48fe-45cb-a464-56fe49532233","Type":"ContainerStarted","Data":"b536a522f4f20835c46f39694399af122b7dd1a9da0a9dbd04437630020cf40f"} Feb 27 17:15:02 crc kubenswrapper[4751]: I0227 17:15:02.699803 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:02 crc kubenswrapper[4751]: I0227 17:15:02.707456 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhwck\" (UniqueName: \"kubernetes.io/projected/b00aca05-48fe-45cb-a464-56fe49532233-kube-api-access-qhwck\") pod \"b00aca05-48fe-45cb-a464-56fe49532233\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " Feb 27 17:15:02 crc kubenswrapper[4751]: I0227 17:15:02.707619 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b00aca05-48fe-45cb-a464-56fe49532233-secret-volume\") pod \"b00aca05-48fe-45cb-a464-56fe49532233\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " Feb 27 17:15:02 crc kubenswrapper[4751]: I0227 17:15:02.707850 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b00aca05-48fe-45cb-a464-56fe49532233-config-volume\") pod \"b00aca05-48fe-45cb-a464-56fe49532233\" (UID: \"b00aca05-48fe-45cb-a464-56fe49532233\") " Feb 27 17:15:02 crc kubenswrapper[4751]: I0227 17:15:02.711368 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b00aca05-48fe-45cb-a464-56fe49532233-config-volume" (OuterVolumeSpecName: "config-volume") pod "b00aca05-48fe-45cb-a464-56fe49532233" (UID: "b00aca05-48fe-45cb-a464-56fe49532233"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:15:02 crc kubenswrapper[4751]: I0227 17:15:02.712904 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b00aca05-48fe-45cb-a464-56fe49532233-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b00aca05-48fe-45cb-a464-56fe49532233" (UID: "b00aca05-48fe-45cb-a464-56fe49532233"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 17:15:02 crc kubenswrapper[4751]: I0227 17:15:02.713880 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b00aca05-48fe-45cb-a464-56fe49532233-kube-api-access-qhwck" (OuterVolumeSpecName: "kube-api-access-qhwck") pod "b00aca05-48fe-45cb-a464-56fe49532233" (UID: "b00aca05-48fe-45cb-a464-56fe49532233"). InnerVolumeSpecName "kube-api-access-qhwck". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:15:02 crc kubenswrapper[4751]: I0227 17:15:02.809848 4751 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b00aca05-48fe-45cb-a464-56fe49532233-config-volume\") on node \"crc\" DevicePath \"\"" Feb 27 17:15:02 crc kubenswrapper[4751]: I0227 17:15:02.809876 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhwck\" (UniqueName: \"kubernetes.io/projected/b00aca05-48fe-45cb-a464-56fe49532233-kube-api-access-qhwck\") on node \"crc\" DevicePath \"\"" Feb 27 17:15:02 crc kubenswrapper[4751]: I0227 17:15:02.809891 4751 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b00aca05-48fe-45cb-a464-56fe49532233-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 27 17:15:03 crc kubenswrapper[4751]: I0227 17:15:03.414166 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" event={"ID":"b00aca05-48fe-45cb-a464-56fe49532233","Type":"ContainerDied","Data":"b536a522f4f20835c46f39694399af122b7dd1a9da0a9dbd04437630020cf40f"} Feb 27 17:15:03 crc kubenswrapper[4751]: I0227 17:15:03.414227 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b536a522f4f20835c46f39694399af122b7dd1a9da0a9dbd04437630020cf40f" Feb 27 17:15:03 crc kubenswrapper[4751]: I0227 17:15:03.414256 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2" Feb 27 17:15:03 crc kubenswrapper[4751]: I0227 17:15:03.807351 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf"] Feb 27 17:15:03 crc kubenswrapper[4751]: I0227 17:15:03.817386 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536830-pggzf"] Feb 27 17:15:04 crc kubenswrapper[4751]: I0227 17:15:04.537995 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="193bf22c-1f33-4d04-8688-f67aef4fc77e" path="/var/lib/kubelet/pods/193bf22c-1f33-4d04-8688-f67aef4fc77e/volumes" Feb 27 17:15:49 crc kubenswrapper[4751]: I0227 17:15:49.745443 4751 scope.go:117] "RemoveContainer" containerID="3e87f972a61a3a942f6e35945ca33d4311334bedb4aa43d97c8816350a0556d8" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.169931 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536876-5k9lj"] Feb 27 17:16:00 crc kubenswrapper[4751]: E0227 17:16:00.171376 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00aca05-48fe-45cb-a464-56fe49532233" containerName="collect-profiles" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.171471 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00aca05-48fe-45cb-a464-56fe49532233" containerName="collect-profiles" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.171833 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00aca05-48fe-45cb-a464-56fe49532233" containerName="collect-profiles" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.172762 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536876-5k9lj" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.177073 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.177088 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.177199 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.188871 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536876-5k9lj"] Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.338946 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkh46\" (UniqueName: \"kubernetes.io/projected/4003ba5b-5827-420e-860b-c76b90a7dbea-kube-api-access-gkh46\") pod \"auto-csr-approver-29536876-5k9lj\" (UID: \"4003ba5b-5827-420e-860b-c76b90a7dbea\") " pod="openshift-infra/auto-csr-approver-29536876-5k9lj" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.440639 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkh46\" (UniqueName: \"kubernetes.io/projected/4003ba5b-5827-420e-860b-c76b90a7dbea-kube-api-access-gkh46\") pod \"auto-csr-approver-29536876-5k9lj\" (UID: \"4003ba5b-5827-420e-860b-c76b90a7dbea\") " pod="openshift-infra/auto-csr-approver-29536876-5k9lj" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.475602 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkh46\" (UniqueName: \"kubernetes.io/projected/4003ba5b-5827-420e-860b-c76b90a7dbea-kube-api-access-gkh46\") pod \"auto-csr-approver-29536876-5k9lj\" (UID: \"4003ba5b-5827-420e-860b-c76b90a7dbea\") " pod="openshift-infra/auto-csr-approver-29536876-5k9lj" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.512135 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536876-5k9lj" Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.787659 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536876-5k9lj"] Feb 27 17:16:00 crc kubenswrapper[4751]: I0227 17:16:00.946111 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536876-5k9lj" event={"ID":"4003ba5b-5827-420e-860b-c76b90a7dbea","Type":"ContainerStarted","Data":"b8b970f48e4758e30419a38279c533ee7b294747b8562429117695af87edd3d6"} Feb 27 17:16:02 crc kubenswrapper[4751]: I0227 17:16:02.963934 4751 generic.go:334] "Generic (PLEG): container finished" podID="4003ba5b-5827-420e-860b-c76b90a7dbea" containerID="addc620d0f9076d94cf5365c143d0d2ded6c639b8237a41a1ff185ad37547562" exitCode=0 Feb 27 17:16:02 crc kubenswrapper[4751]: I0227 17:16:02.964052 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536876-5k9lj" event={"ID":"4003ba5b-5827-420e-860b-c76b90a7dbea","Type":"ContainerDied","Data":"addc620d0f9076d94cf5365c143d0d2ded6c639b8237a41a1ff185ad37547562"} Feb 27 17:16:04 crc kubenswrapper[4751]: I0227 17:16:04.367065 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536876-5k9lj" Feb 27 17:16:04 crc kubenswrapper[4751]: I0227 17:16:04.509212 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkh46\" (UniqueName: \"kubernetes.io/projected/4003ba5b-5827-420e-860b-c76b90a7dbea-kube-api-access-gkh46\") pod \"4003ba5b-5827-420e-860b-c76b90a7dbea\" (UID: \"4003ba5b-5827-420e-860b-c76b90a7dbea\") " Feb 27 17:16:04 crc kubenswrapper[4751]: I0227 17:16:04.518645 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4003ba5b-5827-420e-860b-c76b90a7dbea-kube-api-access-gkh46" (OuterVolumeSpecName: "kube-api-access-gkh46") pod "4003ba5b-5827-420e-860b-c76b90a7dbea" (UID: "4003ba5b-5827-420e-860b-c76b90a7dbea"). InnerVolumeSpecName "kube-api-access-gkh46". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:16:04 crc kubenswrapper[4751]: I0227 17:16:04.611361 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkh46\" (UniqueName: \"kubernetes.io/projected/4003ba5b-5827-420e-860b-c76b90a7dbea-kube-api-access-gkh46\") on node \"crc\" DevicePath \"\"" Feb 27 17:16:04 crc kubenswrapper[4751]: I0227 17:16:04.986870 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536876-5k9lj" event={"ID":"4003ba5b-5827-420e-860b-c76b90a7dbea","Type":"ContainerDied","Data":"b8b970f48e4758e30419a38279c533ee7b294747b8562429117695af87edd3d6"} Feb 27 17:16:04 crc kubenswrapper[4751]: I0227 17:16:04.986946 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8b970f48e4758e30419a38279c533ee7b294747b8562429117695af87edd3d6" Feb 27 17:16:04 crc kubenswrapper[4751]: I0227 17:16:04.986982 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536876-5k9lj" Feb 27 17:16:05 crc kubenswrapper[4751]: I0227 17:16:05.456100 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536870-bxl78"] Feb 27 17:16:05 crc kubenswrapper[4751]: I0227 17:16:05.465720 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536870-bxl78"] Feb 27 17:16:06 crc kubenswrapper[4751]: I0227 17:16:06.540943 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc8bb46d-9242-4d86-9e94-0bc27aa48ed9" path="/var/lib/kubelet/pods/cc8bb46d-9242-4d86-9e94-0bc27aa48ed9/volumes" Feb 27 17:16:28 crc kubenswrapper[4751]: I0227 17:16:28.920115 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:16:28 crc kubenswrapper[4751]: I0227 17:16:28.920530 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:16:49 crc kubenswrapper[4751]: I0227 17:16:49.822486 4751 scope.go:117] "RemoveContainer" containerID="487721ea50edd67a10b916c78b307bef7669fc1bca2ead0bc111cbd6e01d2ba4" Feb 27 17:16:58 crc kubenswrapper[4751]: I0227 17:16:58.918064 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:16:58 crc kubenswrapper[4751]: I0227 17:16:58.919107 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.782069 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xxs98"] Feb 27 17:17:15 crc kubenswrapper[4751]: E0227 17:17:15.783240 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4003ba5b-5827-420e-860b-c76b90a7dbea" containerName="oc" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.783262 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4003ba5b-5827-420e-860b-c76b90a7dbea" containerName="oc" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.783552 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4003ba5b-5827-420e-860b-c76b90a7dbea" containerName="oc" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.785269 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.791584 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xxs98"] Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.848349 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-utilities\") pod \"community-operators-xxs98\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.848529 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrrr2\" (UniqueName: \"kubernetes.io/projected/aba79b48-6788-4bab-a286-bd53046cc569-kube-api-access-lrrr2\") pod \"community-operators-xxs98\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.848562 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-catalog-content\") pod \"community-operators-xxs98\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.949392 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrrr2\" (UniqueName: \"kubernetes.io/projected/aba79b48-6788-4bab-a286-bd53046cc569-kube-api-access-lrrr2\") pod \"community-operators-xxs98\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.949450 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-catalog-content\") pod \"community-operators-xxs98\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.949485 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-utilities\") pod \"community-operators-xxs98\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.950008 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-catalog-content\") pod \"community-operators-xxs98\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.950035 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-utilities\") pod \"community-operators-xxs98\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:15 crc kubenswrapper[4751]: I0227 17:17:15.974080 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrrr2\" (UniqueName: \"kubernetes.io/projected/aba79b48-6788-4bab-a286-bd53046cc569-kube-api-access-lrrr2\") pod \"community-operators-xxs98\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:16 crc kubenswrapper[4751]: I0227 17:17:16.147149 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:16 crc kubenswrapper[4751]: I0227 17:17:16.627108 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xxs98"] Feb 27 17:17:16 crc kubenswrapper[4751]: I0227 17:17:16.684561 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xxs98" event={"ID":"aba79b48-6788-4bab-a286-bd53046cc569","Type":"ContainerStarted","Data":"2b12686bf84fadd90e924dd3fa249d41fe59971a6ad58103ce3e112b029aadab"} Feb 27 17:17:17 crc kubenswrapper[4751]: I0227 17:17:17.696829 4751 generic.go:334] "Generic (PLEG): container finished" podID="aba79b48-6788-4bab-a286-bd53046cc569" containerID="17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8" exitCode=0 Feb 27 17:17:17 crc kubenswrapper[4751]: I0227 17:17:17.696912 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xxs98" event={"ID":"aba79b48-6788-4bab-a286-bd53046cc569","Type":"ContainerDied","Data":"17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8"} Feb 27 17:17:17 crc kubenswrapper[4751]: I0227 17:17:17.699454 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:17:18 crc kubenswrapper[4751]: I0227 17:17:18.708385 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xxs98" event={"ID":"aba79b48-6788-4bab-a286-bd53046cc569","Type":"ContainerStarted","Data":"76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e"} Feb 27 17:17:19 crc kubenswrapper[4751]: I0227 17:17:19.722710 4751 generic.go:334] "Generic (PLEG): container finished" podID="aba79b48-6788-4bab-a286-bd53046cc569" containerID="76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e" exitCode=0 Feb 27 17:17:19 crc kubenswrapper[4751]: I0227 17:17:19.722783 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xxs98" event={"ID":"aba79b48-6788-4bab-a286-bd53046cc569","Type":"ContainerDied","Data":"76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e"} Feb 27 17:17:20 crc kubenswrapper[4751]: I0227 17:17:20.738920 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xxs98" event={"ID":"aba79b48-6788-4bab-a286-bd53046cc569","Type":"ContainerStarted","Data":"544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1"} Feb 27 17:17:20 crc kubenswrapper[4751]: I0227 17:17:20.781595 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xxs98" podStartSLOduration=3.326838977 podStartE2EDuration="5.781574517s" podCreationTimestamp="2026-02-27 17:17:15 +0000 UTC" firstStartedPulling="2026-02-27 17:17:17.699014713 +0000 UTC m=+3199.846029190" lastFinishedPulling="2026-02-27 17:17:20.153750283 +0000 UTC m=+3202.300764730" observedRunningTime="2026-02-27 17:17:20.775610373 +0000 UTC m=+3202.922624830" watchObservedRunningTime="2026-02-27 17:17:20.781574517 +0000 UTC m=+3202.928588994" Feb 27 17:17:26 crc kubenswrapper[4751]: I0227 17:17:26.148305 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:26 crc kubenswrapper[4751]: I0227 17:17:26.148378 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:26 crc kubenswrapper[4751]: I0227 17:17:26.221567 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:26 crc kubenswrapper[4751]: I0227 17:17:26.860170 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:26 crc kubenswrapper[4751]: I0227 17:17:26.929102 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xxs98"] Feb 27 17:17:28 crc kubenswrapper[4751]: I0227 17:17:28.807012 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xxs98" podUID="aba79b48-6788-4bab-a286-bd53046cc569" containerName="registry-server" containerID="cri-o://544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1" gracePeriod=2 Feb 27 17:17:28 crc kubenswrapper[4751]: I0227 17:17:28.922857 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:17:28 crc kubenswrapper[4751]: I0227 17:17:28.922990 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:17:28 crc kubenswrapper[4751]: I0227 17:17:28.923173 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 17:17:28 crc kubenswrapper[4751]: I0227 17:17:28.924682 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 17:17:28 crc kubenswrapper[4751]: I0227 17:17:28.924787 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" gracePeriod=600 Feb 27 17:17:29 crc kubenswrapper[4751]: E0227 17:17:29.061700 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.363327 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.485191 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrrr2\" (UniqueName: \"kubernetes.io/projected/aba79b48-6788-4bab-a286-bd53046cc569-kube-api-access-lrrr2\") pod \"aba79b48-6788-4bab-a286-bd53046cc569\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.485619 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-catalog-content\") pod \"aba79b48-6788-4bab-a286-bd53046cc569\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.485866 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-utilities\") pod \"aba79b48-6788-4bab-a286-bd53046cc569\" (UID: \"aba79b48-6788-4bab-a286-bd53046cc569\") " Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.488090 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-utilities" (OuterVolumeSpecName: "utilities") pod "aba79b48-6788-4bab-a286-bd53046cc569" (UID: "aba79b48-6788-4bab-a286-bd53046cc569"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.495367 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aba79b48-6788-4bab-a286-bd53046cc569-kube-api-access-lrrr2" (OuterVolumeSpecName: "kube-api-access-lrrr2") pod "aba79b48-6788-4bab-a286-bd53046cc569" (UID: "aba79b48-6788-4bab-a286-bd53046cc569"). InnerVolumeSpecName "kube-api-access-lrrr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.575307 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aba79b48-6788-4bab-a286-bd53046cc569" (UID: "aba79b48-6788-4bab-a286-bd53046cc569"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.588041 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrrr2\" (UniqueName: \"kubernetes.io/projected/aba79b48-6788-4bab-a286-bd53046cc569-kube-api-access-lrrr2\") on node \"crc\" DevicePath \"\"" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.588103 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.588130 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba79b48-6788-4bab-a286-bd53046cc569-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.819742 4751 generic.go:334] "Generic (PLEG): container finished" podID="aba79b48-6788-4bab-a286-bd53046cc569" containerID="544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1" exitCode=0 Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.819818 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xxs98" event={"ID":"aba79b48-6788-4bab-a286-bd53046cc569","Type":"ContainerDied","Data":"544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1"} Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.819918 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xxs98" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.819957 4751 scope.go:117] "RemoveContainer" containerID="544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.819928 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xxs98" event={"ID":"aba79b48-6788-4bab-a286-bd53046cc569","Type":"ContainerDied","Data":"2b12686bf84fadd90e924dd3fa249d41fe59971a6ad58103ce3e112b029aadab"} Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.823199 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" exitCode=0 Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.823225 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49"} Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.823740 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:17:29 crc kubenswrapper[4751]: E0227 17:17:29.823998 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.851429 4751 scope.go:117] "RemoveContainer" containerID="76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.867558 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xxs98"] Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.882627 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xxs98"] Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.885605 4751 scope.go:117] "RemoveContainer" containerID="17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.901893 4751 scope.go:117] "RemoveContainer" containerID="544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1" Feb 27 17:17:29 crc kubenswrapper[4751]: E0227 17:17:29.902192 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1\": container with ID starting with 544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1 not found: ID does not exist" containerID="544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.902218 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1"} err="failed to get container status \"544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1\": rpc error: code = NotFound desc = could not find container \"544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1\": container with ID starting with 544e5a0fdfee1f47e1351f2ebc42ba2238a6a737eb85f190d6ca1f17ee7184a1 not found: ID does not exist" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.902259 4751 scope.go:117] "RemoveContainer" containerID="76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e" Feb 27 17:17:29 crc kubenswrapper[4751]: E0227 17:17:29.902478 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e\": container with ID starting with 76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e not found: ID does not exist" containerID="76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.902493 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e"} err="failed to get container status \"76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e\": rpc error: code = NotFound desc = could not find container \"76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e\": container with ID starting with 76fe7ca17b4aa3be991eda75677f48fcfbfdfa02955b851e64219afcaaea828e not found: ID does not exist" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.902505 4751 scope.go:117] "RemoveContainer" containerID="17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8" Feb 27 17:17:29 crc kubenswrapper[4751]: E0227 17:17:29.902733 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8\": container with ID starting with 17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8 not found: ID does not exist" containerID="17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.902750 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8"} err="failed to get container status \"17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8\": rpc error: code = NotFound desc = could not find container \"17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8\": container with ID starting with 17ee1a656052cbb2e528212958a04b9adc0a50ed6f7f3426368cc367b407c5d8 not found: ID does not exist" Feb 27 17:17:29 crc kubenswrapper[4751]: I0227 17:17:29.902761 4751 scope.go:117] "RemoveContainer" containerID="bd8999e1ab4da3e8cda07e501f94f9a857c58ee7c6ff481b7560440bab9e40c0" Feb 27 17:17:30 crc kubenswrapper[4751]: I0227 17:17:30.538703 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aba79b48-6788-4bab-a286-bd53046cc569" path="/var/lib/kubelet/pods/aba79b48-6788-4bab-a286-bd53046cc569/volumes" Feb 27 17:17:40 crc kubenswrapper[4751]: I0227 17:17:40.521337 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:17:40 crc kubenswrapper[4751]: E0227 17:17:40.522345 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:17:54 crc kubenswrapper[4751]: I0227 17:17:54.521063 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:17:54 crc kubenswrapper[4751]: E0227 17:17:54.522252 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.181951 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536878-qh5qk"] Feb 27 17:18:00 crc kubenswrapper[4751]: E0227 17:18:00.183065 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aba79b48-6788-4bab-a286-bd53046cc569" containerName="registry-server" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.183091 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="aba79b48-6788-4bab-a286-bd53046cc569" containerName="registry-server" Feb 27 17:18:00 crc kubenswrapper[4751]: E0227 17:18:00.183119 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aba79b48-6788-4bab-a286-bd53046cc569" containerName="extract-content" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.183133 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="aba79b48-6788-4bab-a286-bd53046cc569" containerName="extract-content" Feb 27 17:18:00 crc kubenswrapper[4751]: E0227 17:18:00.183154 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aba79b48-6788-4bab-a286-bd53046cc569" containerName="extract-utilities" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.183167 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="aba79b48-6788-4bab-a286-bd53046cc569" containerName="extract-utilities" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.183515 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="aba79b48-6788-4bab-a286-bd53046cc569" containerName="registry-server" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.184301 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536878-qh5qk" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.193950 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.194753 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.195148 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.196436 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536878-qh5qk"] Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.339696 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkl5k\" (UniqueName: \"kubernetes.io/projected/b7e1f43f-f252-405d-a031-7773d9219a08-kube-api-access-jkl5k\") pod \"auto-csr-approver-29536878-qh5qk\" (UID: \"b7e1f43f-f252-405d-a031-7773d9219a08\") " pod="openshift-infra/auto-csr-approver-29536878-qh5qk" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.440958 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkl5k\" (UniqueName: \"kubernetes.io/projected/b7e1f43f-f252-405d-a031-7773d9219a08-kube-api-access-jkl5k\") pod \"auto-csr-approver-29536878-qh5qk\" (UID: \"b7e1f43f-f252-405d-a031-7773d9219a08\") " pod="openshift-infra/auto-csr-approver-29536878-qh5qk" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.468683 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkl5k\" (UniqueName: \"kubernetes.io/projected/b7e1f43f-f252-405d-a031-7773d9219a08-kube-api-access-jkl5k\") pod \"auto-csr-approver-29536878-qh5qk\" (UID: \"b7e1f43f-f252-405d-a031-7773d9219a08\") " pod="openshift-infra/auto-csr-approver-29536878-qh5qk" Feb 27 17:18:00 crc kubenswrapper[4751]: I0227 17:18:00.526594 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536878-qh5qk" Feb 27 17:18:01 crc kubenswrapper[4751]: I0227 17:18:01.025314 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536878-qh5qk"] Feb 27 17:18:01 crc kubenswrapper[4751]: W0227 17:18:01.034098 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7e1f43f_f252_405d_a031_7773d9219a08.slice/crio-998cd96bcdbf1abee9404be77014469b5e1c76a428f49bb86471ec2b3ebea615 WatchSource:0}: Error finding container 998cd96bcdbf1abee9404be77014469b5e1c76a428f49bb86471ec2b3ebea615: Status 404 returned error can't find the container with id 998cd96bcdbf1abee9404be77014469b5e1c76a428f49bb86471ec2b3ebea615 Feb 27 17:18:01 crc kubenswrapper[4751]: I0227 17:18:01.320176 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536878-qh5qk" event={"ID":"b7e1f43f-f252-405d-a031-7773d9219a08","Type":"ContainerStarted","Data":"998cd96bcdbf1abee9404be77014469b5e1c76a428f49bb86471ec2b3ebea615"} Feb 27 17:18:03 crc kubenswrapper[4751]: I0227 17:18:03.342013 4751 generic.go:334] "Generic (PLEG): container finished" podID="b7e1f43f-f252-405d-a031-7773d9219a08" containerID="d48fab6b560e7073221a3874fecf9e89a07c51db5abe3889409c3cf839ef2e98" exitCode=0 Feb 27 17:18:03 crc kubenswrapper[4751]: I0227 17:18:03.342140 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536878-qh5qk" event={"ID":"b7e1f43f-f252-405d-a031-7773d9219a08","Type":"ContainerDied","Data":"d48fab6b560e7073221a3874fecf9e89a07c51db5abe3889409c3cf839ef2e98"} Feb 27 17:18:04 crc kubenswrapper[4751]: I0227 17:18:04.697798 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536878-qh5qk" Feb 27 17:18:04 crc kubenswrapper[4751]: I0227 17:18:04.747536 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkl5k\" (UniqueName: \"kubernetes.io/projected/b7e1f43f-f252-405d-a031-7773d9219a08-kube-api-access-jkl5k\") pod \"b7e1f43f-f252-405d-a031-7773d9219a08\" (UID: \"b7e1f43f-f252-405d-a031-7773d9219a08\") " Feb 27 17:18:04 crc kubenswrapper[4751]: I0227 17:18:04.760154 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7e1f43f-f252-405d-a031-7773d9219a08-kube-api-access-jkl5k" (OuterVolumeSpecName: "kube-api-access-jkl5k") pod "b7e1f43f-f252-405d-a031-7773d9219a08" (UID: "b7e1f43f-f252-405d-a031-7773d9219a08"). InnerVolumeSpecName "kube-api-access-jkl5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:18:04 crc kubenswrapper[4751]: I0227 17:18:04.850392 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkl5k\" (UniqueName: \"kubernetes.io/projected/b7e1f43f-f252-405d-a031-7773d9219a08-kube-api-access-jkl5k\") on node \"crc\" DevicePath \"\"" Feb 27 17:18:05 crc kubenswrapper[4751]: I0227 17:18:05.363968 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536878-qh5qk" event={"ID":"b7e1f43f-f252-405d-a031-7773d9219a08","Type":"ContainerDied","Data":"998cd96bcdbf1abee9404be77014469b5e1c76a428f49bb86471ec2b3ebea615"} Feb 27 17:18:05 crc kubenswrapper[4751]: I0227 17:18:05.364067 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="998cd96bcdbf1abee9404be77014469b5e1c76a428f49bb86471ec2b3ebea615" Feb 27 17:18:05 crc kubenswrapper[4751]: I0227 17:18:05.364123 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536878-qh5qk" Feb 27 17:18:05 crc kubenswrapper[4751]: I0227 17:18:05.832290 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536872-5dslp"] Feb 27 17:18:05 crc kubenswrapper[4751]: I0227 17:18:05.838878 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536872-5dslp"] Feb 27 17:18:06 crc kubenswrapper[4751]: I0227 17:18:06.532791 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbd1eb54-185b-4132-b7ea-f6ce2688c71c" path="/var/lib/kubelet/pods/bbd1eb54-185b-4132-b7ea-f6ce2688c71c/volumes" Feb 27 17:18:09 crc kubenswrapper[4751]: I0227 17:18:09.521234 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:18:09 crc kubenswrapper[4751]: E0227 17:18:09.521937 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:18:20 crc kubenswrapper[4751]: I0227 17:18:20.520943 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:18:20 crc kubenswrapper[4751]: E0227 17:18:20.522238 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:18:35 crc kubenswrapper[4751]: I0227 17:18:35.520950 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:18:35 crc kubenswrapper[4751]: E0227 17:18:35.522134 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:18:46 crc kubenswrapper[4751]: I0227 17:18:46.521345 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:18:46 crc kubenswrapper[4751]: E0227 17:18:46.522303 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:18:49 crc kubenswrapper[4751]: I0227 17:18:49.970789 4751 scope.go:117] "RemoveContainer" containerID="7d910b7111ec218ef847e3fdc11cd9f1242b1257246181d340d7bc37558f1a34" Feb 27 17:19:00 crc kubenswrapper[4751]: I0227 17:19:00.522146 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:19:00 crc kubenswrapper[4751]: E0227 17:19:00.522817 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:19:11 crc kubenswrapper[4751]: I0227 17:19:11.520274 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:19:11 crc kubenswrapper[4751]: E0227 17:19:11.520842 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.499661 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5rkbj"] Feb 27 17:19:25 crc kubenswrapper[4751]: E0227 17:19:25.500353 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7e1f43f-f252-405d-a031-7773d9219a08" containerName="oc" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.500365 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7e1f43f-f252-405d-a031-7773d9219a08" containerName="oc" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.500529 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7e1f43f-f252-405d-a031-7773d9219a08" containerName="oc" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.501449 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.519220 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5rkbj"] Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.520863 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:19:25 crc kubenswrapper[4751]: E0227 17:19:25.521062 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.601249 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-catalog-content\") pod \"redhat-operators-5rkbj\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.601332 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-utilities\") pod \"redhat-operators-5rkbj\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.601581 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2qfw\" (UniqueName: \"kubernetes.io/projected/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-kube-api-access-k2qfw\") pod \"redhat-operators-5rkbj\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.702596 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-catalog-content\") pod \"redhat-operators-5rkbj\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.702681 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-utilities\") pod \"redhat-operators-5rkbj\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.702779 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2qfw\" (UniqueName: \"kubernetes.io/projected/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-kube-api-access-k2qfw\") pod \"redhat-operators-5rkbj\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.703135 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-catalog-content\") pod \"redhat-operators-5rkbj\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.703300 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-utilities\") pod \"redhat-operators-5rkbj\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.729713 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2qfw\" (UniqueName: \"kubernetes.io/projected/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-kube-api-access-k2qfw\") pod \"redhat-operators-5rkbj\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:25 crc kubenswrapper[4751]: I0227 17:19:25.823352 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:26 crc kubenswrapper[4751]: I0227 17:19:26.306998 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5rkbj"] Feb 27 17:19:27 crc kubenswrapper[4751]: I0227 17:19:27.083345 4751 generic.go:334] "Generic (PLEG): container finished" podID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerID="4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915" exitCode=0 Feb 27 17:19:27 crc kubenswrapper[4751]: I0227 17:19:27.083434 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rkbj" event={"ID":"72cd0ab1-6c59-4ce2-b691-04a43a5816c1","Type":"ContainerDied","Data":"4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915"} Feb 27 17:19:27 crc kubenswrapper[4751]: I0227 17:19:27.083753 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rkbj" event={"ID":"72cd0ab1-6c59-4ce2-b691-04a43a5816c1","Type":"ContainerStarted","Data":"9b53d03b69c9bb966bc6fedd66926c1a05b026ed9443e2094fabdeded1af4369"} Feb 27 17:19:28 crc kubenswrapper[4751]: I0227 17:19:28.105708 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rkbj" event={"ID":"72cd0ab1-6c59-4ce2-b691-04a43a5816c1","Type":"ContainerStarted","Data":"70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6"} Feb 27 17:19:29 crc kubenswrapper[4751]: I0227 17:19:29.113076 4751 generic.go:334] "Generic (PLEG): container finished" podID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerID="70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6" exitCode=0 Feb 27 17:19:29 crc kubenswrapper[4751]: I0227 17:19:29.113119 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rkbj" event={"ID":"72cd0ab1-6c59-4ce2-b691-04a43a5816c1","Type":"ContainerDied","Data":"70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6"} Feb 27 17:19:30 crc kubenswrapper[4751]: I0227 17:19:30.124387 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rkbj" event={"ID":"72cd0ab1-6c59-4ce2-b691-04a43a5816c1","Type":"ContainerStarted","Data":"bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582"} Feb 27 17:19:30 crc kubenswrapper[4751]: I0227 17:19:30.157650 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5rkbj" podStartSLOduration=2.616127048 podStartE2EDuration="5.157636388s" podCreationTimestamp="2026-02-27 17:19:25 +0000 UTC" firstStartedPulling="2026-02-27 17:19:27.085379231 +0000 UTC m=+3329.232393678" lastFinishedPulling="2026-02-27 17:19:29.626888561 +0000 UTC m=+3331.773903018" observedRunningTime="2026-02-27 17:19:30.150611537 +0000 UTC m=+3332.297626034" watchObservedRunningTime="2026-02-27 17:19:30.157636388 +0000 UTC m=+3332.304650835" Feb 27 17:19:35 crc kubenswrapper[4751]: I0227 17:19:35.824283 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:35 crc kubenswrapper[4751]: I0227 17:19:35.825676 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:36 crc kubenswrapper[4751]: I0227 17:19:36.903142 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5rkbj" podUID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerName="registry-server" probeResult="failure" output=< Feb 27 17:19:36 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 17:19:36 crc kubenswrapper[4751]: > Feb 27 17:19:40 crc kubenswrapper[4751]: I0227 17:19:40.522272 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:19:40 crc kubenswrapper[4751]: E0227 17:19:40.522736 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:19:45 crc kubenswrapper[4751]: I0227 17:19:45.888255 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:45 crc kubenswrapper[4751]: I0227 17:19:45.948720 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:46 crc kubenswrapper[4751]: I0227 17:19:46.154610 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5rkbj"] Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.259394 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5rkbj" podUID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerName="registry-server" containerID="cri-o://bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582" gracePeriod=2 Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.674997 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.770780 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-utilities\") pod \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.770933 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2qfw\" (UniqueName: \"kubernetes.io/projected/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-kube-api-access-k2qfw\") pod \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.771052 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-catalog-content\") pod \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\" (UID: \"72cd0ab1-6c59-4ce2-b691-04a43a5816c1\") " Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.772460 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-utilities" (OuterVolumeSpecName: "utilities") pod "72cd0ab1-6c59-4ce2-b691-04a43a5816c1" (UID: "72cd0ab1-6c59-4ce2-b691-04a43a5816c1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.778777 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-kube-api-access-k2qfw" (OuterVolumeSpecName: "kube-api-access-k2qfw") pod "72cd0ab1-6c59-4ce2-b691-04a43a5816c1" (UID: "72cd0ab1-6c59-4ce2-b691-04a43a5816c1"). InnerVolumeSpecName "kube-api-access-k2qfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.872989 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2qfw\" (UniqueName: \"kubernetes.io/projected/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-kube-api-access-k2qfw\") on node \"crc\" DevicePath \"\"" Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.873048 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.953744 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "72cd0ab1-6c59-4ce2-b691-04a43a5816c1" (UID: "72cd0ab1-6c59-4ce2-b691-04a43a5816c1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:19:47 crc kubenswrapper[4751]: I0227 17:19:47.974909 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72cd0ab1-6c59-4ce2-b691-04a43a5816c1-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.268307 4751 generic.go:334] "Generic (PLEG): container finished" podID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerID="bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582" exitCode=0 Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.268346 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rkbj" event={"ID":"72cd0ab1-6c59-4ce2-b691-04a43a5816c1","Type":"ContainerDied","Data":"bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582"} Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.268371 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rkbj" event={"ID":"72cd0ab1-6c59-4ce2-b691-04a43a5816c1","Type":"ContainerDied","Data":"9b53d03b69c9bb966bc6fedd66926c1a05b026ed9443e2094fabdeded1af4369"} Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.268386 4751 scope.go:117] "RemoveContainer" containerID="bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.268498 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rkbj" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.313805 4751 scope.go:117] "RemoveContainer" containerID="70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.316966 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5rkbj"] Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.335215 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5rkbj"] Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.346374 4751 scope.go:117] "RemoveContainer" containerID="4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.362507 4751 scope.go:117] "RemoveContainer" containerID="bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582" Feb 27 17:19:48 crc kubenswrapper[4751]: E0227 17:19:48.362894 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582\": container with ID starting with bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582 not found: ID does not exist" containerID="bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.362935 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582"} err="failed to get container status \"bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582\": rpc error: code = NotFound desc = could not find container \"bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582\": container with ID starting with bd83ae9cb0029a08e50dec5bef1377aef734ec97364314d134d05dee9d819582 not found: ID does not exist" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.362963 4751 scope.go:117] "RemoveContainer" containerID="70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6" Feb 27 17:19:48 crc kubenswrapper[4751]: E0227 17:19:48.363154 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6\": container with ID starting with 70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6 not found: ID does not exist" containerID="70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.363185 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6"} err="failed to get container status \"70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6\": rpc error: code = NotFound desc = could not find container \"70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6\": container with ID starting with 70ddc2c90eb48f5373a190e5d51ee4e03620a07e6fa13d3a3a4e3a5e508c6fc6 not found: ID does not exist" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.363203 4751 scope.go:117] "RemoveContainer" containerID="4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915" Feb 27 17:19:48 crc kubenswrapper[4751]: E0227 17:19:48.363543 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915\": container with ID starting with 4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915 not found: ID does not exist" containerID="4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.363571 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915"} err="failed to get container status \"4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915\": rpc error: code = NotFound desc = could not find container \"4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915\": container with ID starting with 4647841248b4e8190d7e33544814b5398f1f42f220257d4a87a5fd928df2e915 not found: ID does not exist" Feb 27 17:19:48 crc kubenswrapper[4751]: I0227 17:19:48.533284 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" path="/var/lib/kubelet/pods/72cd0ab1-6c59-4ce2-b691-04a43a5816c1/volumes" Feb 27 17:19:52 crc kubenswrapper[4751]: I0227 17:19:52.520603 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:19:52 crc kubenswrapper[4751]: E0227 17:19:52.521371 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.156327 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536880-wbbfp"] Feb 27 17:20:00 crc kubenswrapper[4751]: E0227 17:20:00.157213 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerName="extract-content" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.157238 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerName="extract-content" Feb 27 17:20:00 crc kubenswrapper[4751]: E0227 17:20:00.157270 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerName="extract-utilities" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.157288 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerName="extract-utilities" Feb 27 17:20:00 crc kubenswrapper[4751]: E0227 17:20:00.157335 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerName="registry-server" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.157349 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerName="registry-server" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.157645 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="72cd0ab1-6c59-4ce2-b691-04a43a5816c1" containerName="registry-server" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.162011 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536880-wbbfp" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.164933 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.165860 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.166553 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.176020 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536880-wbbfp"] Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.266954 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfbtm\" (UniqueName: \"kubernetes.io/projected/c9ac5da7-d730-4f01-b049-b6f39dc0e4dd-kube-api-access-mfbtm\") pod \"auto-csr-approver-29536880-wbbfp\" (UID: \"c9ac5da7-d730-4f01-b049-b6f39dc0e4dd\") " pod="openshift-infra/auto-csr-approver-29536880-wbbfp" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.367841 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfbtm\" (UniqueName: \"kubernetes.io/projected/c9ac5da7-d730-4f01-b049-b6f39dc0e4dd-kube-api-access-mfbtm\") pod \"auto-csr-approver-29536880-wbbfp\" (UID: \"c9ac5da7-d730-4f01-b049-b6f39dc0e4dd\") " pod="openshift-infra/auto-csr-approver-29536880-wbbfp" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.393803 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfbtm\" (UniqueName: \"kubernetes.io/projected/c9ac5da7-d730-4f01-b049-b6f39dc0e4dd-kube-api-access-mfbtm\") pod \"auto-csr-approver-29536880-wbbfp\" (UID: \"c9ac5da7-d730-4f01-b049-b6f39dc0e4dd\") " pod="openshift-infra/auto-csr-approver-29536880-wbbfp" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.495219 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536880-wbbfp" Feb 27 17:20:00 crc kubenswrapper[4751]: I0227 17:20:00.972347 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536880-wbbfp"] Feb 27 17:20:01 crc kubenswrapper[4751]: I0227 17:20:01.395303 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536880-wbbfp" event={"ID":"c9ac5da7-d730-4f01-b049-b6f39dc0e4dd","Type":"ContainerStarted","Data":"0f03aa444728a07e8c13ab8ddeb37f7572674eb526bb0b40fd3b1c5925ad84e6"} Feb 27 17:20:03 crc kubenswrapper[4751]: I0227 17:20:03.409493 4751 generic.go:334] "Generic (PLEG): container finished" podID="c9ac5da7-d730-4f01-b049-b6f39dc0e4dd" containerID="d4423ca0ab41048b753f1f0af77f75958e6f82f575729d6b2cb784e4b4145685" exitCode=0 Feb 27 17:20:03 crc kubenswrapper[4751]: I0227 17:20:03.409694 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536880-wbbfp" event={"ID":"c9ac5da7-d730-4f01-b049-b6f39dc0e4dd","Type":"ContainerDied","Data":"d4423ca0ab41048b753f1f0af77f75958e6f82f575729d6b2cb784e4b4145685"} Feb 27 17:20:04 crc kubenswrapper[4751]: I0227 17:20:04.851609 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536880-wbbfp" Feb 27 17:20:04 crc kubenswrapper[4751]: I0227 17:20:04.958748 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfbtm\" (UniqueName: \"kubernetes.io/projected/c9ac5da7-d730-4f01-b049-b6f39dc0e4dd-kube-api-access-mfbtm\") pod \"c9ac5da7-d730-4f01-b049-b6f39dc0e4dd\" (UID: \"c9ac5da7-d730-4f01-b049-b6f39dc0e4dd\") " Feb 27 17:20:04 crc kubenswrapper[4751]: I0227 17:20:04.969952 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9ac5da7-d730-4f01-b049-b6f39dc0e4dd-kube-api-access-mfbtm" (OuterVolumeSpecName: "kube-api-access-mfbtm") pod "c9ac5da7-d730-4f01-b049-b6f39dc0e4dd" (UID: "c9ac5da7-d730-4f01-b049-b6f39dc0e4dd"). InnerVolumeSpecName "kube-api-access-mfbtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:20:05 crc kubenswrapper[4751]: I0227 17:20:05.060818 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfbtm\" (UniqueName: \"kubernetes.io/projected/c9ac5da7-d730-4f01-b049-b6f39dc0e4dd-kube-api-access-mfbtm\") on node \"crc\" DevicePath \"\"" Feb 27 17:20:05 crc kubenswrapper[4751]: I0227 17:20:05.467793 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536880-wbbfp" event={"ID":"c9ac5da7-d730-4f01-b049-b6f39dc0e4dd","Type":"ContainerDied","Data":"0f03aa444728a07e8c13ab8ddeb37f7572674eb526bb0b40fd3b1c5925ad84e6"} Feb 27 17:20:05 crc kubenswrapper[4751]: I0227 17:20:05.467855 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f03aa444728a07e8c13ab8ddeb37f7572674eb526bb0b40fd3b1c5925ad84e6" Feb 27 17:20:05 crc kubenswrapper[4751]: I0227 17:20:05.467873 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536880-wbbfp" Feb 27 17:20:05 crc kubenswrapper[4751]: I0227 17:20:05.521430 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:20:05 crc kubenswrapper[4751]: E0227 17:20:05.521904 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:20:05 crc kubenswrapper[4751]: I0227 17:20:05.943928 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536874-w4nmg"] Feb 27 17:20:05 crc kubenswrapper[4751]: I0227 17:20:05.950798 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536874-w4nmg"] Feb 27 17:20:06 crc kubenswrapper[4751]: I0227 17:20:06.539170 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5" path="/var/lib/kubelet/pods/62c6c6bd-82b4-4b5a-88d7-ebcbf999f3b5/volumes" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.709963 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p22qp"] Feb 27 17:20:08 crc kubenswrapper[4751]: E0227 17:20:08.710707 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ac5da7-d730-4f01-b049-b6f39dc0e4dd" containerName="oc" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.710741 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ac5da7-d730-4f01-b049-b6f39dc0e4dd" containerName="oc" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.711120 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9ac5da7-d730-4f01-b049-b6f39dc0e4dd" containerName="oc" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.713643 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.723996 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p22qp"] Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.824458 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d537dc1a-e8f2-4288-8791-dc83e923cd75-catalog-content\") pod \"certified-operators-p22qp\" (UID: \"d537dc1a-e8f2-4288-8791-dc83e923cd75\") " pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.824612 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d537dc1a-e8f2-4288-8791-dc83e923cd75-utilities\") pod \"certified-operators-p22qp\" (UID: \"d537dc1a-e8f2-4288-8791-dc83e923cd75\") " pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.824682 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt8cl\" (UniqueName: \"kubernetes.io/projected/d537dc1a-e8f2-4288-8791-dc83e923cd75-kube-api-access-mt8cl\") pod \"certified-operators-p22qp\" (UID: \"d537dc1a-e8f2-4288-8791-dc83e923cd75\") " pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.928053 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d537dc1a-e8f2-4288-8791-dc83e923cd75-utilities\") pod \"certified-operators-p22qp\" (UID: \"d537dc1a-e8f2-4288-8791-dc83e923cd75\") " pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.928129 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt8cl\" (UniqueName: \"kubernetes.io/projected/d537dc1a-e8f2-4288-8791-dc83e923cd75-kube-api-access-mt8cl\") pod \"certified-operators-p22qp\" (UID: \"d537dc1a-e8f2-4288-8791-dc83e923cd75\") " pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.928164 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d537dc1a-e8f2-4288-8791-dc83e923cd75-catalog-content\") pod \"certified-operators-p22qp\" (UID: \"d537dc1a-e8f2-4288-8791-dc83e923cd75\") " pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.928790 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d537dc1a-e8f2-4288-8791-dc83e923cd75-catalog-content\") pod \"certified-operators-p22qp\" (UID: \"d537dc1a-e8f2-4288-8791-dc83e923cd75\") " pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.928791 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d537dc1a-e8f2-4288-8791-dc83e923cd75-utilities\") pod \"certified-operators-p22qp\" (UID: \"d537dc1a-e8f2-4288-8791-dc83e923cd75\") " pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:08 crc kubenswrapper[4751]: I0227 17:20:08.955435 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt8cl\" (UniqueName: \"kubernetes.io/projected/d537dc1a-e8f2-4288-8791-dc83e923cd75-kube-api-access-mt8cl\") pod \"certified-operators-p22qp\" (UID: \"d537dc1a-e8f2-4288-8791-dc83e923cd75\") " pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:09 crc kubenswrapper[4751]: I0227 17:20:09.051877 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:09 crc kubenswrapper[4751]: I0227 17:20:09.359988 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p22qp"] Feb 27 17:20:09 crc kubenswrapper[4751]: I0227 17:20:09.502178 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p22qp" event={"ID":"d537dc1a-e8f2-4288-8791-dc83e923cd75","Type":"ContainerStarted","Data":"49c0920da42ac61237d6b13eb6e84b3a6333f864bbf944a27f145354c3b6b079"} Feb 27 17:20:10 crc kubenswrapper[4751]: I0227 17:20:10.513099 4751 generic.go:334] "Generic (PLEG): container finished" podID="d537dc1a-e8f2-4288-8791-dc83e923cd75" containerID="f97fe5d67c8ffbfe0c7dd7b93586ed3771e1d6174aa6db88f78caeed6acb0f5a" exitCode=0 Feb 27 17:20:10 crc kubenswrapper[4751]: I0227 17:20:10.513149 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p22qp" event={"ID":"d537dc1a-e8f2-4288-8791-dc83e923cd75","Type":"ContainerDied","Data":"f97fe5d67c8ffbfe0c7dd7b93586ed3771e1d6174aa6db88f78caeed6acb0f5a"} Feb 27 17:20:16 crc kubenswrapper[4751]: I0227 17:20:16.582659 4751 generic.go:334] "Generic (PLEG): container finished" podID="d537dc1a-e8f2-4288-8791-dc83e923cd75" containerID="99ec597c26c2bc5738e6b3345d34c26b8d7ff57804f6c7fb8de0744d29d89e59" exitCode=0 Feb 27 17:20:16 crc kubenswrapper[4751]: I0227 17:20:16.582775 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p22qp" event={"ID":"d537dc1a-e8f2-4288-8791-dc83e923cd75","Type":"ContainerDied","Data":"99ec597c26c2bc5738e6b3345d34c26b8d7ff57804f6c7fb8de0744d29d89e59"} Feb 27 17:20:17 crc kubenswrapper[4751]: I0227 17:20:17.596677 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p22qp" event={"ID":"d537dc1a-e8f2-4288-8791-dc83e923cd75","Type":"ContainerStarted","Data":"67e2de6545c938deb33b50b0514e4a3dae86e1c3c05c4461b01bdaa4f05d4cca"} Feb 27 17:20:17 crc kubenswrapper[4751]: I0227 17:20:17.635144 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p22qp" podStartSLOduration=3.156795915 podStartE2EDuration="9.635115185s" podCreationTimestamp="2026-02-27 17:20:08 +0000 UTC" firstStartedPulling="2026-02-27 17:20:10.515589943 +0000 UTC m=+3372.662604390" lastFinishedPulling="2026-02-27 17:20:16.993909173 +0000 UTC m=+3379.140923660" observedRunningTime="2026-02-27 17:20:17.624614787 +0000 UTC m=+3379.771629274" watchObservedRunningTime="2026-02-27 17:20:17.635115185 +0000 UTC m=+3379.782129672" Feb 27 17:20:18 crc kubenswrapper[4751]: I0227 17:20:18.529787 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:20:18 crc kubenswrapper[4751]: E0227 17:20:18.530333 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:20:19 crc kubenswrapper[4751]: I0227 17:20:19.052863 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:19 crc kubenswrapper[4751]: I0227 17:20:19.053146 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:20 crc kubenswrapper[4751]: I0227 17:20:20.122164 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-p22qp" podUID="d537dc1a-e8f2-4288-8791-dc83e923cd75" containerName="registry-server" probeResult="failure" output=< Feb 27 17:20:20 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 17:20:20 crc kubenswrapper[4751]: > Feb 27 17:20:29 crc kubenswrapper[4751]: I0227 17:20:29.126440 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:29 crc kubenswrapper[4751]: I0227 17:20:29.203732 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p22qp" Feb 27 17:20:29 crc kubenswrapper[4751]: I0227 17:20:29.521166 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:20:29 crc kubenswrapper[4751]: E0227 17:20:29.521561 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:20:29 crc kubenswrapper[4751]: I0227 17:20:29.644531 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p22qp"] Feb 27 17:20:29 crc kubenswrapper[4751]: I0227 17:20:29.792949 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f7hzl"] Feb 27 17:20:29 crc kubenswrapper[4751]: I0227 17:20:29.793173 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-f7hzl" podUID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerName="registry-server" containerID="cri-o://173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5" gracePeriod=2 Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.262853 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.357724 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgl76\" (UniqueName: \"kubernetes.io/projected/203be014-29c5-44e2-bc8d-1a71c1448f57-kube-api-access-cgl76\") pod \"203be014-29c5-44e2-bc8d-1a71c1448f57\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.357847 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-utilities\") pod \"203be014-29c5-44e2-bc8d-1a71c1448f57\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.357882 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-catalog-content\") pod \"203be014-29c5-44e2-bc8d-1a71c1448f57\" (UID: \"203be014-29c5-44e2-bc8d-1a71c1448f57\") " Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.358666 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-utilities" (OuterVolumeSpecName: "utilities") pod "203be014-29c5-44e2-bc8d-1a71c1448f57" (UID: "203be014-29c5-44e2-bc8d-1a71c1448f57"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.362908 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/203be014-29c5-44e2-bc8d-1a71c1448f57-kube-api-access-cgl76" (OuterVolumeSpecName: "kube-api-access-cgl76") pod "203be014-29c5-44e2-bc8d-1a71c1448f57" (UID: "203be014-29c5-44e2-bc8d-1a71c1448f57"). InnerVolumeSpecName "kube-api-access-cgl76". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.402769 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "203be014-29c5-44e2-bc8d-1a71c1448f57" (UID: "203be014-29c5-44e2-bc8d-1a71c1448f57"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.458889 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgl76\" (UniqueName: \"kubernetes.io/projected/203be014-29c5-44e2-bc8d-1a71c1448f57-kube-api-access-cgl76\") on node \"crc\" DevicePath \"\"" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.458922 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.458934 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/203be014-29c5-44e2-bc8d-1a71c1448f57-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.727772 4751 generic.go:334] "Generic (PLEG): container finished" podID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerID="173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5" exitCode=0 Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.728532 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f7hzl" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.728902 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7hzl" event={"ID":"203be014-29c5-44e2-bc8d-1a71c1448f57","Type":"ContainerDied","Data":"173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5"} Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.728929 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7hzl" event={"ID":"203be014-29c5-44e2-bc8d-1a71c1448f57","Type":"ContainerDied","Data":"7c2721e5b344978aa00387bbea09520d923f7264d7ccf701f6afe65dfb03e12c"} Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.728945 4751 scope.go:117] "RemoveContainer" containerID="173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.749952 4751 scope.go:117] "RemoveContainer" containerID="dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.751110 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f7hzl"] Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.757251 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-f7hzl"] Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.773074 4751 scope.go:117] "RemoveContainer" containerID="559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.802680 4751 scope.go:117] "RemoveContainer" containerID="173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5" Feb 27 17:20:30 crc kubenswrapper[4751]: E0227 17:20:30.803228 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5\": container with ID starting with 173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5 not found: ID does not exist" containerID="173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.803279 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5"} err="failed to get container status \"173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5\": rpc error: code = NotFound desc = could not find container \"173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5\": container with ID starting with 173d8ba8a355a979fab9b924d0aef58c33a5ccd52fb9a82b7e8a0dc20dab07d5 not found: ID does not exist" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.803304 4751 scope.go:117] "RemoveContainer" containerID="dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae" Feb 27 17:20:30 crc kubenswrapper[4751]: E0227 17:20:30.803727 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae\": container with ID starting with dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae not found: ID does not exist" containerID="dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.803750 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae"} err="failed to get container status \"dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae\": rpc error: code = NotFound desc = could not find container \"dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae\": container with ID starting with dea35405915979b67414244fc47f43f0db8e554c2474e46bea346dbb139d4fae not found: ID does not exist" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.803764 4751 scope.go:117] "RemoveContainer" containerID="559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848" Feb 27 17:20:30 crc kubenswrapper[4751]: E0227 17:20:30.804162 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848\": container with ID starting with 559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848 not found: ID does not exist" containerID="559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848" Feb 27 17:20:30 crc kubenswrapper[4751]: I0227 17:20:30.804189 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848"} err="failed to get container status \"559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848\": rpc error: code = NotFound desc = could not find container \"559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848\": container with ID starting with 559a84c20d51f1f3f6f8e2b60a2caa886af63917133ec15fbfac9e45df923848 not found: ID does not exist" Feb 27 17:20:32 crc kubenswrapper[4751]: I0227 17:20:32.529130 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="203be014-29c5-44e2-bc8d-1a71c1448f57" path="/var/lib/kubelet/pods/203be014-29c5-44e2-bc8d-1a71c1448f57/volumes" Feb 27 17:20:40 crc kubenswrapper[4751]: I0227 17:20:40.521197 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:20:40 crc kubenswrapper[4751]: E0227 17:20:40.522329 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:20:50 crc kubenswrapper[4751]: I0227 17:20:50.133584 4751 scope.go:117] "RemoveContainer" containerID="033a86dfe1bf4c9c9edae194f889398cd74163f12d43e5d96a63bf9521b1dcef" Feb 27 17:20:52 crc kubenswrapper[4751]: I0227 17:20:52.520347 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:20:52 crc kubenswrapper[4751]: E0227 17:20:52.520996 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:21:03 crc kubenswrapper[4751]: I0227 17:21:03.521661 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:21:03 crc kubenswrapper[4751]: E0227 17:21:03.522649 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:21:18 crc kubenswrapper[4751]: I0227 17:21:18.527733 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:21:18 crc kubenswrapper[4751]: E0227 17:21:18.528590 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:21:32 crc kubenswrapper[4751]: I0227 17:21:32.521671 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:21:32 crc kubenswrapper[4751]: E0227 17:21:32.522698 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:21:47 crc kubenswrapper[4751]: I0227 17:21:47.520934 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:21:47 crc kubenswrapper[4751]: E0227 17:21:47.522818 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.153741 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536882-fc7rb"] Feb 27 17:22:00 crc kubenswrapper[4751]: E0227 17:22:00.154688 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerName="extract-utilities" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.154706 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerName="extract-utilities" Feb 27 17:22:00 crc kubenswrapper[4751]: E0227 17:22:00.154756 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerName="extract-content" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.154768 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerName="extract-content" Feb 27 17:22:00 crc kubenswrapper[4751]: E0227 17:22:00.154794 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerName="registry-server" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.154805 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerName="registry-server" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.155019 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="203be014-29c5-44e2-bc8d-1a71c1448f57" containerName="registry-server" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.155640 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536882-fc7rb" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.159279 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.159330 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.159295 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.165265 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536882-fc7rb"] Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.264368 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6snxj\" (UniqueName: \"kubernetes.io/projected/2b29c8ed-b493-44f7-ba88-cb028512a748-kube-api-access-6snxj\") pod \"auto-csr-approver-29536882-fc7rb\" (UID: \"2b29c8ed-b493-44f7-ba88-cb028512a748\") " pod="openshift-infra/auto-csr-approver-29536882-fc7rb" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.366223 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6snxj\" (UniqueName: \"kubernetes.io/projected/2b29c8ed-b493-44f7-ba88-cb028512a748-kube-api-access-6snxj\") pod \"auto-csr-approver-29536882-fc7rb\" (UID: \"2b29c8ed-b493-44f7-ba88-cb028512a748\") " pod="openshift-infra/auto-csr-approver-29536882-fc7rb" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.398854 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6snxj\" (UniqueName: \"kubernetes.io/projected/2b29c8ed-b493-44f7-ba88-cb028512a748-kube-api-access-6snxj\") pod \"auto-csr-approver-29536882-fc7rb\" (UID: \"2b29c8ed-b493-44f7-ba88-cb028512a748\") " pod="openshift-infra/auto-csr-approver-29536882-fc7rb" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.487182 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536882-fc7rb" Feb 27 17:22:00 crc kubenswrapper[4751]: I0227 17:22:00.777451 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536882-fc7rb"] Feb 27 17:22:01 crc kubenswrapper[4751]: I0227 17:22:01.614176 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536882-fc7rb" event={"ID":"2b29c8ed-b493-44f7-ba88-cb028512a748","Type":"ContainerStarted","Data":"69a3e97e1d5aed9b5b05802ad8b107670f0fbebca1752988cc7fcb8f80a11222"} Feb 27 17:22:02 crc kubenswrapper[4751]: I0227 17:22:02.520897 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:22:02 crc kubenswrapper[4751]: E0227 17:22:02.521721 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:22:02 crc kubenswrapper[4751]: I0227 17:22:02.625385 4751 generic.go:334] "Generic (PLEG): container finished" podID="2b29c8ed-b493-44f7-ba88-cb028512a748" containerID="c577296bf7f47a79523d757c0a9b2c35bf37babe05cbd8e5dbec908ee6fca153" exitCode=0 Feb 27 17:22:02 crc kubenswrapper[4751]: I0227 17:22:02.625515 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536882-fc7rb" event={"ID":"2b29c8ed-b493-44f7-ba88-cb028512a748","Type":"ContainerDied","Data":"c577296bf7f47a79523d757c0a9b2c35bf37babe05cbd8e5dbec908ee6fca153"} Feb 27 17:22:04 crc kubenswrapper[4751]: I0227 17:22:04.020300 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536882-fc7rb" Feb 27 17:22:04 crc kubenswrapper[4751]: I0227 17:22:04.128114 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6snxj\" (UniqueName: \"kubernetes.io/projected/2b29c8ed-b493-44f7-ba88-cb028512a748-kube-api-access-6snxj\") pod \"2b29c8ed-b493-44f7-ba88-cb028512a748\" (UID: \"2b29c8ed-b493-44f7-ba88-cb028512a748\") " Feb 27 17:22:04 crc kubenswrapper[4751]: I0227 17:22:04.136572 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b29c8ed-b493-44f7-ba88-cb028512a748-kube-api-access-6snxj" (OuterVolumeSpecName: "kube-api-access-6snxj") pod "2b29c8ed-b493-44f7-ba88-cb028512a748" (UID: "2b29c8ed-b493-44f7-ba88-cb028512a748"). InnerVolumeSpecName "kube-api-access-6snxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:22:04 crc kubenswrapper[4751]: I0227 17:22:04.229975 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6snxj\" (UniqueName: \"kubernetes.io/projected/2b29c8ed-b493-44f7-ba88-cb028512a748-kube-api-access-6snxj\") on node \"crc\" DevicePath \"\"" Feb 27 17:22:04 crc kubenswrapper[4751]: I0227 17:22:04.648896 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536882-fc7rb" event={"ID":"2b29c8ed-b493-44f7-ba88-cb028512a748","Type":"ContainerDied","Data":"69a3e97e1d5aed9b5b05802ad8b107670f0fbebca1752988cc7fcb8f80a11222"} Feb 27 17:22:04 crc kubenswrapper[4751]: I0227 17:22:04.648949 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69a3e97e1d5aed9b5b05802ad8b107670f0fbebca1752988cc7fcb8f80a11222" Feb 27 17:22:04 crc kubenswrapper[4751]: I0227 17:22:04.649001 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536882-fc7rb" Feb 27 17:22:05 crc kubenswrapper[4751]: I0227 17:22:05.110592 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536876-5k9lj"] Feb 27 17:22:05 crc kubenswrapper[4751]: I0227 17:22:05.122463 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536876-5k9lj"] Feb 27 17:22:06 crc kubenswrapper[4751]: I0227 17:22:06.536372 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4003ba5b-5827-420e-860b-c76b90a7dbea" path="/var/lib/kubelet/pods/4003ba5b-5827-420e-860b-c76b90a7dbea/volumes" Feb 27 17:22:13 crc kubenswrapper[4751]: I0227 17:22:13.520986 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:22:13 crc kubenswrapper[4751]: E0227 17:22:13.522169 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:22:26 crc kubenswrapper[4751]: I0227 17:22:26.520767 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:22:26 crc kubenswrapper[4751]: E0227 17:22:26.521711 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:22:41 crc kubenswrapper[4751]: I0227 17:22:41.520921 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:22:42 crc kubenswrapper[4751]: I0227 17:22:42.005935 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"7940bb00f128870146fc695298c50c06f15b54ea799ca5166d68c2e457868cfa"} Feb 27 17:22:50 crc kubenswrapper[4751]: I0227 17:22:50.278062 4751 scope.go:117] "RemoveContainer" containerID="addc620d0f9076d94cf5365c143d0d2ded6c639b8237a41a1ff185ad37547562" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.148587 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536884-4q8r5"] Feb 27 17:24:00 crc kubenswrapper[4751]: E0227 17:24:00.149749 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b29c8ed-b493-44f7-ba88-cb028512a748" containerName="oc" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.149772 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b29c8ed-b493-44f7-ba88-cb028512a748" containerName="oc" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.150057 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b29c8ed-b493-44f7-ba88-cb028512a748" containerName="oc" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.150825 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536884-4q8r5" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.155201 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.157554 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.160587 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.166992 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536884-4q8r5"] Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.179694 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9jj2\" (UniqueName: \"kubernetes.io/projected/34401835-367b-47f6-b11d-f546ff10e459-kube-api-access-l9jj2\") pod \"auto-csr-approver-29536884-4q8r5\" (UID: \"34401835-367b-47f6-b11d-f546ff10e459\") " pod="openshift-infra/auto-csr-approver-29536884-4q8r5" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.282353 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9jj2\" (UniqueName: \"kubernetes.io/projected/34401835-367b-47f6-b11d-f546ff10e459-kube-api-access-l9jj2\") pod \"auto-csr-approver-29536884-4q8r5\" (UID: \"34401835-367b-47f6-b11d-f546ff10e459\") " pod="openshift-infra/auto-csr-approver-29536884-4q8r5" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.313896 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9jj2\" (UniqueName: \"kubernetes.io/projected/34401835-367b-47f6-b11d-f546ff10e459-kube-api-access-l9jj2\") pod \"auto-csr-approver-29536884-4q8r5\" (UID: \"34401835-367b-47f6-b11d-f546ff10e459\") " pod="openshift-infra/auto-csr-approver-29536884-4q8r5" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.476256 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536884-4q8r5" Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.764622 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536884-4q8r5"] Feb 27 17:24:00 crc kubenswrapper[4751]: I0227 17:24:00.778459 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:24:01 crc kubenswrapper[4751]: I0227 17:24:01.712031 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536884-4q8r5" event={"ID":"34401835-367b-47f6-b11d-f546ff10e459","Type":"ContainerStarted","Data":"bd6e9ca62cc980a6d45f97b9e4aca9c67373b115c42b779a5f2d3a5f4f528073"} Feb 27 17:24:02 crc kubenswrapper[4751]: I0227 17:24:02.725099 4751 generic.go:334] "Generic (PLEG): container finished" podID="34401835-367b-47f6-b11d-f546ff10e459" containerID="f49b0d75cfe7a9513ce1bd7b42ff4af9efa4e301002e3adbf6d201f9514a31bd" exitCode=0 Feb 27 17:24:02 crc kubenswrapper[4751]: I0227 17:24:02.725327 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536884-4q8r5" event={"ID":"34401835-367b-47f6-b11d-f546ff10e459","Type":"ContainerDied","Data":"f49b0d75cfe7a9513ce1bd7b42ff4af9efa4e301002e3adbf6d201f9514a31bd"} Feb 27 17:24:04 crc kubenswrapper[4751]: I0227 17:24:04.111767 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536884-4q8r5" Feb 27 17:24:04 crc kubenswrapper[4751]: I0227 17:24:04.148515 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9jj2\" (UniqueName: \"kubernetes.io/projected/34401835-367b-47f6-b11d-f546ff10e459-kube-api-access-l9jj2\") pod \"34401835-367b-47f6-b11d-f546ff10e459\" (UID: \"34401835-367b-47f6-b11d-f546ff10e459\") " Feb 27 17:24:04 crc kubenswrapper[4751]: I0227 17:24:04.169059 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34401835-367b-47f6-b11d-f546ff10e459-kube-api-access-l9jj2" (OuterVolumeSpecName: "kube-api-access-l9jj2") pod "34401835-367b-47f6-b11d-f546ff10e459" (UID: "34401835-367b-47f6-b11d-f546ff10e459"). InnerVolumeSpecName "kube-api-access-l9jj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:24:04 crc kubenswrapper[4751]: I0227 17:24:04.250453 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9jj2\" (UniqueName: \"kubernetes.io/projected/34401835-367b-47f6-b11d-f546ff10e459-kube-api-access-l9jj2\") on node \"crc\" DevicePath \"\"" Feb 27 17:24:04 crc kubenswrapper[4751]: I0227 17:24:04.748881 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536884-4q8r5" event={"ID":"34401835-367b-47f6-b11d-f546ff10e459","Type":"ContainerDied","Data":"bd6e9ca62cc980a6d45f97b9e4aca9c67373b115c42b779a5f2d3a5f4f528073"} Feb 27 17:24:04 crc kubenswrapper[4751]: I0227 17:24:04.748928 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd6e9ca62cc980a6d45f97b9e4aca9c67373b115c42b779a5f2d3a5f4f528073" Feb 27 17:24:04 crc kubenswrapper[4751]: I0227 17:24:04.748969 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536884-4q8r5" Feb 27 17:24:05 crc kubenswrapper[4751]: I0227 17:24:05.203845 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536878-qh5qk"] Feb 27 17:24:05 crc kubenswrapper[4751]: I0227 17:24:05.211760 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536878-qh5qk"] Feb 27 17:24:06 crc kubenswrapper[4751]: I0227 17:24:06.531346 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7e1f43f-f252-405d-a031-7773d9219a08" path="/var/lib/kubelet/pods/b7e1f43f-f252-405d-a031-7773d9219a08/volumes" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.282785 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9mjhd"] Feb 27 17:24:49 crc kubenswrapper[4751]: E0227 17:24:49.283503 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34401835-367b-47f6-b11d-f546ff10e459" containerName="oc" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.283516 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="34401835-367b-47f6-b11d-f546ff10e459" containerName="oc" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.283665 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="34401835-367b-47f6-b11d-f546ff10e459" containerName="oc" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.284511 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.296315 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9mjhd"] Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.423684 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-catalog-content\") pod \"redhat-marketplace-9mjhd\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.423754 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7jwb\" (UniqueName: \"kubernetes.io/projected/d8eddc6a-3cbd-4b53-85cf-81056fad1354-kube-api-access-q7jwb\") pod \"redhat-marketplace-9mjhd\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.423784 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-utilities\") pod \"redhat-marketplace-9mjhd\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.524639 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7jwb\" (UniqueName: \"kubernetes.io/projected/d8eddc6a-3cbd-4b53-85cf-81056fad1354-kube-api-access-q7jwb\") pod \"redhat-marketplace-9mjhd\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.524716 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-utilities\") pod \"redhat-marketplace-9mjhd\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.524881 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-catalog-content\") pod \"redhat-marketplace-9mjhd\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.527975 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-utilities\") pod \"redhat-marketplace-9mjhd\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.528046 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-catalog-content\") pod \"redhat-marketplace-9mjhd\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.543791 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7jwb\" (UniqueName: \"kubernetes.io/projected/d8eddc6a-3cbd-4b53-85cf-81056fad1354-kube-api-access-q7jwb\") pod \"redhat-marketplace-9mjhd\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:49 crc kubenswrapper[4751]: I0227 17:24:49.602925 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:50 crc kubenswrapper[4751]: I0227 17:24:50.136599 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9mjhd"] Feb 27 17:24:50 crc kubenswrapper[4751]: I0227 17:24:50.202427 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9mjhd" event={"ID":"d8eddc6a-3cbd-4b53-85cf-81056fad1354","Type":"ContainerStarted","Data":"9bfdc1a5bdce6c33fdef8144a80eeb3a1a680c0e484c7a53565797d3fd8b81b4"} Feb 27 17:24:50 crc kubenswrapper[4751]: I0227 17:24:50.451073 4751 scope.go:117] "RemoveContainer" containerID="d48fab6b560e7073221a3874fecf9e89a07c51db5abe3889409c3cf839ef2e98" Feb 27 17:24:51 crc kubenswrapper[4751]: I0227 17:24:51.213678 4751 generic.go:334] "Generic (PLEG): container finished" podID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerID="fa6b8875e6a1bc0ec06478967be8e1866c347ad855bc60a5b73645b69d51a2e6" exitCode=0 Feb 27 17:24:51 crc kubenswrapper[4751]: I0227 17:24:51.213779 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9mjhd" event={"ID":"d8eddc6a-3cbd-4b53-85cf-81056fad1354","Type":"ContainerDied","Data":"fa6b8875e6a1bc0ec06478967be8e1866c347ad855bc60a5b73645b69d51a2e6"} Feb 27 17:24:52 crc kubenswrapper[4751]: I0227 17:24:52.230325 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9mjhd" event={"ID":"d8eddc6a-3cbd-4b53-85cf-81056fad1354","Type":"ContainerStarted","Data":"8a52f3336053964477d1bf05c557981322b028180ab67e7f11a404dfdee1c8fb"} Feb 27 17:24:53 crc kubenswrapper[4751]: I0227 17:24:53.243379 4751 generic.go:334] "Generic (PLEG): container finished" podID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerID="8a52f3336053964477d1bf05c557981322b028180ab67e7f11a404dfdee1c8fb" exitCode=0 Feb 27 17:24:53 crc kubenswrapper[4751]: I0227 17:24:53.243474 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9mjhd" event={"ID":"d8eddc6a-3cbd-4b53-85cf-81056fad1354","Type":"ContainerDied","Data":"8a52f3336053964477d1bf05c557981322b028180ab67e7f11a404dfdee1c8fb"} Feb 27 17:24:54 crc kubenswrapper[4751]: I0227 17:24:54.255287 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9mjhd" event={"ID":"d8eddc6a-3cbd-4b53-85cf-81056fad1354","Type":"ContainerStarted","Data":"fcacef5496eef0996e6f7cc1b790401d6722dbf389535c9ebe5b72df37f02ef4"} Feb 27 17:24:54 crc kubenswrapper[4751]: I0227 17:24:54.284636 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9mjhd" podStartSLOduration=2.792057393 podStartE2EDuration="5.284603348s" podCreationTimestamp="2026-02-27 17:24:49 +0000 UTC" firstStartedPulling="2026-02-27 17:24:51.215630656 +0000 UTC m=+3653.362645103" lastFinishedPulling="2026-02-27 17:24:53.708176611 +0000 UTC m=+3655.855191058" observedRunningTime="2026-02-27 17:24:54.282962156 +0000 UTC m=+3656.429976653" watchObservedRunningTime="2026-02-27 17:24:54.284603348 +0000 UTC m=+3656.431617835" Feb 27 17:24:58 crc kubenswrapper[4751]: I0227 17:24:58.918671 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:24:58 crc kubenswrapper[4751]: I0227 17:24:58.919298 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:24:59 crc kubenswrapper[4751]: I0227 17:24:59.603818 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:59 crc kubenswrapper[4751]: I0227 17:24:59.603888 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:24:59 crc kubenswrapper[4751]: I0227 17:24:59.685639 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:25:00 crc kubenswrapper[4751]: I0227 17:25:00.374763 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:25:00 crc kubenswrapper[4751]: I0227 17:25:00.447866 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9mjhd"] Feb 27 17:25:02 crc kubenswrapper[4751]: I0227 17:25:02.324132 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9mjhd" podUID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerName="registry-server" containerID="cri-o://fcacef5496eef0996e6f7cc1b790401d6722dbf389535c9ebe5b72df37f02ef4" gracePeriod=2 Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.348776 4751 generic.go:334] "Generic (PLEG): container finished" podID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerID="fcacef5496eef0996e6f7cc1b790401d6722dbf389535c9ebe5b72df37f02ef4" exitCode=0 Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.349524 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9mjhd" event={"ID":"d8eddc6a-3cbd-4b53-85cf-81056fad1354","Type":"ContainerDied","Data":"fcacef5496eef0996e6f7cc1b790401d6722dbf389535c9ebe5b72df37f02ef4"} Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.349592 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9mjhd" event={"ID":"d8eddc6a-3cbd-4b53-85cf-81056fad1354","Type":"ContainerDied","Data":"9bfdc1a5bdce6c33fdef8144a80eeb3a1a680c0e484c7a53565797d3fd8b81b4"} Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.349633 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bfdc1a5bdce6c33fdef8144a80eeb3a1a680c0e484c7a53565797d3fd8b81b4" Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.373284 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.512732 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-catalog-content\") pod \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.512786 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7jwb\" (UniqueName: \"kubernetes.io/projected/d8eddc6a-3cbd-4b53-85cf-81056fad1354-kube-api-access-q7jwb\") pod \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.512875 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-utilities\") pod \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\" (UID: \"d8eddc6a-3cbd-4b53-85cf-81056fad1354\") " Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.514174 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-utilities" (OuterVolumeSpecName: "utilities") pod "d8eddc6a-3cbd-4b53-85cf-81056fad1354" (UID: "d8eddc6a-3cbd-4b53-85cf-81056fad1354"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.520142 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8eddc6a-3cbd-4b53-85cf-81056fad1354-kube-api-access-q7jwb" (OuterVolumeSpecName: "kube-api-access-q7jwb") pod "d8eddc6a-3cbd-4b53-85cf-81056fad1354" (UID: "d8eddc6a-3cbd-4b53-85cf-81056fad1354"). InnerVolumeSpecName "kube-api-access-q7jwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.553964 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8eddc6a-3cbd-4b53-85cf-81056fad1354" (UID: "d8eddc6a-3cbd-4b53-85cf-81056fad1354"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.616328 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.616367 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8eddc6a-3cbd-4b53-85cf-81056fad1354-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:25:03 crc kubenswrapper[4751]: I0227 17:25:03.616385 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7jwb\" (UniqueName: \"kubernetes.io/projected/d8eddc6a-3cbd-4b53-85cf-81056fad1354-kube-api-access-q7jwb\") on node \"crc\" DevicePath \"\"" Feb 27 17:25:04 crc kubenswrapper[4751]: I0227 17:25:04.358570 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9mjhd" Feb 27 17:25:04 crc kubenswrapper[4751]: I0227 17:25:04.409353 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9mjhd"] Feb 27 17:25:04 crc kubenswrapper[4751]: I0227 17:25:04.418070 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9mjhd"] Feb 27 17:25:04 crc kubenswrapper[4751]: I0227 17:25:04.534938 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" path="/var/lib/kubelet/pods/d8eddc6a-3cbd-4b53-85cf-81056fad1354/volumes" Feb 27 17:25:28 crc kubenswrapper[4751]: I0227 17:25:28.918207 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:25:28 crc kubenswrapper[4751]: I0227 17:25:28.918841 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:25:58 crc kubenswrapper[4751]: I0227 17:25:58.918711 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:25:58 crc kubenswrapper[4751]: I0227 17:25:58.919177 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:25:58 crc kubenswrapper[4751]: I0227 17:25:58.919218 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 17:25:58 crc kubenswrapper[4751]: I0227 17:25:58.919683 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7940bb00f128870146fc695298c50c06f15b54ea799ca5166d68c2e457868cfa"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 17:25:58 crc kubenswrapper[4751]: I0227 17:25:58.919762 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://7940bb00f128870146fc695298c50c06f15b54ea799ca5166d68c2e457868cfa" gracePeriod=600 Feb 27 17:25:59 crc kubenswrapper[4751]: I0227 17:25:59.878567 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="7940bb00f128870146fc695298c50c06f15b54ea799ca5166d68c2e457868cfa" exitCode=0 Feb 27 17:25:59 crc kubenswrapper[4751]: I0227 17:25:59.879214 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"7940bb00f128870146fc695298c50c06f15b54ea799ca5166d68c2e457868cfa"} Feb 27 17:25:59 crc kubenswrapper[4751]: I0227 17:25:59.879270 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f"} Feb 27 17:25:59 crc kubenswrapper[4751]: I0227 17:25:59.879303 4751 scope.go:117] "RemoveContainer" containerID="24909509b162007a331ad1f1ed04527d54e49b744f4f0dadab53f62586493e49" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.159660 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536886-c5hgd"] Feb 27 17:26:00 crc kubenswrapper[4751]: E0227 17:26:00.160174 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerName="extract-utilities" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.160201 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerName="extract-utilities" Feb 27 17:26:00 crc kubenswrapper[4751]: E0227 17:26:00.160251 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerName="extract-content" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.160268 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerName="extract-content" Feb 27 17:26:00 crc kubenswrapper[4751]: E0227 17:26:00.160299 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerName="registry-server" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.160316 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerName="registry-server" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.160703 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8eddc6a-3cbd-4b53-85cf-81056fad1354" containerName="registry-server" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.161628 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536886-c5hgd" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.171647 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536886-c5hgd"] Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.184997 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.185748 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.187395 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.281161 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58kdc\" (UniqueName: \"kubernetes.io/projected/c6247c33-9a8a-404b-aeb6-01b6296624c2-kube-api-access-58kdc\") pod \"auto-csr-approver-29536886-c5hgd\" (UID: \"c6247c33-9a8a-404b-aeb6-01b6296624c2\") " pod="openshift-infra/auto-csr-approver-29536886-c5hgd" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.382346 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58kdc\" (UniqueName: \"kubernetes.io/projected/c6247c33-9a8a-404b-aeb6-01b6296624c2-kube-api-access-58kdc\") pod \"auto-csr-approver-29536886-c5hgd\" (UID: \"c6247c33-9a8a-404b-aeb6-01b6296624c2\") " pod="openshift-infra/auto-csr-approver-29536886-c5hgd" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.416951 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58kdc\" (UniqueName: \"kubernetes.io/projected/c6247c33-9a8a-404b-aeb6-01b6296624c2-kube-api-access-58kdc\") pod \"auto-csr-approver-29536886-c5hgd\" (UID: \"c6247c33-9a8a-404b-aeb6-01b6296624c2\") " pod="openshift-infra/auto-csr-approver-29536886-c5hgd" Feb 27 17:26:00 crc kubenswrapper[4751]: I0227 17:26:00.511307 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536886-c5hgd" Feb 27 17:26:01 crc kubenswrapper[4751]: I0227 17:26:01.004030 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536886-c5hgd"] Feb 27 17:26:01 crc kubenswrapper[4751]: W0227 17:26:01.015132 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6247c33_9a8a_404b_aeb6_01b6296624c2.slice/crio-62917940c78f27db975629cf6297aa9c2733512b6b084426bb56687b0709cc23 WatchSource:0}: Error finding container 62917940c78f27db975629cf6297aa9c2733512b6b084426bb56687b0709cc23: Status 404 returned error can't find the container with id 62917940c78f27db975629cf6297aa9c2733512b6b084426bb56687b0709cc23 Feb 27 17:26:01 crc kubenswrapper[4751]: I0227 17:26:01.903230 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536886-c5hgd" event={"ID":"c6247c33-9a8a-404b-aeb6-01b6296624c2","Type":"ContainerStarted","Data":"62917940c78f27db975629cf6297aa9c2733512b6b084426bb56687b0709cc23"} Feb 27 17:26:02 crc kubenswrapper[4751]: I0227 17:26:02.918073 4751 generic.go:334] "Generic (PLEG): container finished" podID="c6247c33-9a8a-404b-aeb6-01b6296624c2" containerID="a57abc270ecaccc908ca246339a9fd528bf7682c02d9e86253642cf2c507857a" exitCode=0 Feb 27 17:26:02 crc kubenswrapper[4751]: I0227 17:26:02.918135 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536886-c5hgd" event={"ID":"c6247c33-9a8a-404b-aeb6-01b6296624c2","Type":"ContainerDied","Data":"a57abc270ecaccc908ca246339a9fd528bf7682c02d9e86253642cf2c507857a"} Feb 27 17:26:04 crc kubenswrapper[4751]: I0227 17:26:04.300215 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536886-c5hgd" Feb 27 17:26:04 crc kubenswrapper[4751]: I0227 17:26:04.345969 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58kdc\" (UniqueName: \"kubernetes.io/projected/c6247c33-9a8a-404b-aeb6-01b6296624c2-kube-api-access-58kdc\") pod \"c6247c33-9a8a-404b-aeb6-01b6296624c2\" (UID: \"c6247c33-9a8a-404b-aeb6-01b6296624c2\") " Feb 27 17:26:04 crc kubenswrapper[4751]: I0227 17:26:04.354206 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6247c33-9a8a-404b-aeb6-01b6296624c2-kube-api-access-58kdc" (OuterVolumeSpecName: "kube-api-access-58kdc") pod "c6247c33-9a8a-404b-aeb6-01b6296624c2" (UID: "c6247c33-9a8a-404b-aeb6-01b6296624c2"). InnerVolumeSpecName "kube-api-access-58kdc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:26:04 crc kubenswrapper[4751]: I0227 17:26:04.447761 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58kdc\" (UniqueName: \"kubernetes.io/projected/c6247c33-9a8a-404b-aeb6-01b6296624c2-kube-api-access-58kdc\") on node \"crc\" DevicePath \"\"" Feb 27 17:26:04 crc kubenswrapper[4751]: I0227 17:26:04.940285 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536886-c5hgd" event={"ID":"c6247c33-9a8a-404b-aeb6-01b6296624c2","Type":"ContainerDied","Data":"62917940c78f27db975629cf6297aa9c2733512b6b084426bb56687b0709cc23"} Feb 27 17:26:04 crc kubenswrapper[4751]: I0227 17:26:04.940374 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="62917940c78f27db975629cf6297aa9c2733512b6b084426bb56687b0709cc23" Feb 27 17:26:04 crc kubenswrapper[4751]: I0227 17:26:04.940511 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536886-c5hgd" Feb 27 17:26:05 crc kubenswrapper[4751]: I0227 17:26:05.393032 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536880-wbbfp"] Feb 27 17:26:05 crc kubenswrapper[4751]: I0227 17:26:05.402886 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536880-wbbfp"] Feb 27 17:26:06 crc kubenswrapper[4751]: I0227 17:26:06.535475 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9ac5da7-d730-4f01-b049-b6f39dc0e4dd" path="/var/lib/kubelet/pods/c9ac5da7-d730-4f01-b049-b6f39dc0e4dd/volumes" Feb 27 17:26:50 crc kubenswrapper[4751]: I0227 17:26:50.538462 4751 scope.go:117] "RemoveContainer" containerID="d4423ca0ab41048b753f1f0af77f75958e6f82f575729d6b2cb784e4b4145685" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.168524 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536888-8h576"] Feb 27 17:28:00 crc kubenswrapper[4751]: E0227 17:28:00.172787 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6247c33-9a8a-404b-aeb6-01b6296624c2" containerName="oc" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.172831 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6247c33-9a8a-404b-aeb6-01b6296624c2" containerName="oc" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.173361 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6247c33-9a8a-404b-aeb6-01b6296624c2" containerName="oc" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.174556 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536888-8h576" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.180584 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.182502 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.182672 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.191301 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536888-8h576"] Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.376263 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlwjt\" (UniqueName: \"kubernetes.io/projected/ec005fa2-d8c2-4768-864d-afbe9ff57629-kube-api-access-jlwjt\") pod \"auto-csr-approver-29536888-8h576\" (UID: \"ec005fa2-d8c2-4768-864d-afbe9ff57629\") " pod="openshift-infra/auto-csr-approver-29536888-8h576" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.478726 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlwjt\" (UniqueName: \"kubernetes.io/projected/ec005fa2-d8c2-4768-864d-afbe9ff57629-kube-api-access-jlwjt\") pod \"auto-csr-approver-29536888-8h576\" (UID: \"ec005fa2-d8c2-4768-864d-afbe9ff57629\") " pod="openshift-infra/auto-csr-approver-29536888-8h576" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.521094 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlwjt\" (UniqueName: \"kubernetes.io/projected/ec005fa2-d8c2-4768-864d-afbe9ff57629-kube-api-access-jlwjt\") pod \"auto-csr-approver-29536888-8h576\" (UID: \"ec005fa2-d8c2-4768-864d-afbe9ff57629\") " pod="openshift-infra/auto-csr-approver-29536888-8h576" Feb 27 17:28:00 crc kubenswrapper[4751]: I0227 17:28:00.811371 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536888-8h576" Feb 27 17:28:01 crc kubenswrapper[4751]: I0227 17:28:01.132094 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536888-8h576"] Feb 27 17:28:02 crc kubenswrapper[4751]: I0227 17:28:02.010720 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536888-8h576" event={"ID":"ec005fa2-d8c2-4768-864d-afbe9ff57629","Type":"ContainerStarted","Data":"e2f91e4c390f24059591be964f8695f3d0195e890f07db20fa6a3b9acf508b76"} Feb 27 17:28:03 crc kubenswrapper[4751]: I0227 17:28:03.021855 4751 generic.go:334] "Generic (PLEG): container finished" podID="ec005fa2-d8c2-4768-864d-afbe9ff57629" containerID="39f23feeda8f7dc1aae75038c62f8ec70f5ee970a8bb62e009ff8c07a52758c4" exitCode=0 Feb 27 17:28:03 crc kubenswrapper[4751]: I0227 17:28:03.022074 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536888-8h576" event={"ID":"ec005fa2-d8c2-4768-864d-afbe9ff57629","Type":"ContainerDied","Data":"39f23feeda8f7dc1aae75038c62f8ec70f5ee970a8bb62e009ff8c07a52758c4"} Feb 27 17:28:04 crc kubenswrapper[4751]: I0227 17:28:04.408146 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536888-8h576" Feb 27 17:28:04 crc kubenswrapper[4751]: I0227 17:28:04.446383 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlwjt\" (UniqueName: \"kubernetes.io/projected/ec005fa2-d8c2-4768-864d-afbe9ff57629-kube-api-access-jlwjt\") pod \"ec005fa2-d8c2-4768-864d-afbe9ff57629\" (UID: \"ec005fa2-d8c2-4768-864d-afbe9ff57629\") " Feb 27 17:28:04 crc kubenswrapper[4751]: I0227 17:28:04.456080 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec005fa2-d8c2-4768-864d-afbe9ff57629-kube-api-access-jlwjt" (OuterVolumeSpecName: "kube-api-access-jlwjt") pod "ec005fa2-d8c2-4768-864d-afbe9ff57629" (UID: "ec005fa2-d8c2-4768-864d-afbe9ff57629"). InnerVolumeSpecName "kube-api-access-jlwjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:28:04 crc kubenswrapper[4751]: I0227 17:28:04.548314 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlwjt\" (UniqueName: \"kubernetes.io/projected/ec005fa2-d8c2-4768-864d-afbe9ff57629-kube-api-access-jlwjt\") on node \"crc\" DevicePath \"\"" Feb 27 17:28:05 crc kubenswrapper[4751]: I0227 17:28:05.046552 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536888-8h576" event={"ID":"ec005fa2-d8c2-4768-864d-afbe9ff57629","Type":"ContainerDied","Data":"e2f91e4c390f24059591be964f8695f3d0195e890f07db20fa6a3b9acf508b76"} Feb 27 17:28:05 crc kubenswrapper[4751]: I0227 17:28:05.046953 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2f91e4c390f24059591be964f8695f3d0195e890f07db20fa6a3b9acf508b76" Feb 27 17:28:05 crc kubenswrapper[4751]: I0227 17:28:05.046653 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536888-8h576" Feb 27 17:28:05 crc kubenswrapper[4751]: I0227 17:28:05.506569 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536882-fc7rb"] Feb 27 17:28:05 crc kubenswrapper[4751]: I0227 17:28:05.519744 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536882-fc7rb"] Feb 27 17:28:06 crc kubenswrapper[4751]: I0227 17:28:06.535926 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b29c8ed-b493-44f7-ba88-cb028512a748" path="/var/lib/kubelet/pods/2b29c8ed-b493-44f7-ba88-cb028512a748/volumes" Feb 27 17:28:28 crc kubenswrapper[4751]: I0227 17:28:28.918187 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:28:28 crc kubenswrapper[4751]: I0227 17:28:28.919047 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:28:50 crc kubenswrapper[4751]: I0227 17:28:50.636766 4751 scope.go:117] "RemoveContainer" containerID="c577296bf7f47a79523d757c0a9b2c35bf37babe05cbd8e5dbec908ee6fca153" Feb 27 17:28:58 crc kubenswrapper[4751]: I0227 17:28:58.918131 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:28:58 crc kubenswrapper[4751]: I0227 17:28:58.918771 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:29:28 crc kubenswrapper[4751]: I0227 17:29:28.918016 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:29:28 crc kubenswrapper[4751]: I0227 17:29:28.918860 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:29:28 crc kubenswrapper[4751]: I0227 17:29:28.918952 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 17:29:28 crc kubenswrapper[4751]: I0227 17:29:28.920391 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 17:29:28 crc kubenswrapper[4751]: I0227 17:29:28.920545 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" gracePeriod=600 Feb 27 17:29:29 crc kubenswrapper[4751]: E0227 17:29:29.052281 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:29:29 crc kubenswrapper[4751]: I0227 17:29:29.969503 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" exitCode=0 Feb 27 17:29:29 crc kubenswrapper[4751]: I0227 17:29:29.969617 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f"} Feb 27 17:29:29 crc kubenswrapper[4751]: I0227 17:29:29.970002 4751 scope.go:117] "RemoveContainer" containerID="7940bb00f128870146fc695298c50c06f15b54ea799ca5166d68c2e457868cfa" Feb 27 17:29:29 crc kubenswrapper[4751]: I0227 17:29:29.970989 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:29:29 crc kubenswrapper[4751]: E0227 17:29:29.971640 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.034011 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pjdzr"] Feb 27 17:29:30 crc kubenswrapper[4751]: E0227 17:29:30.034343 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec005fa2-d8c2-4768-864d-afbe9ff57629" containerName="oc" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.034364 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec005fa2-d8c2-4768-864d-afbe9ff57629" containerName="oc" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.034577 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec005fa2-d8c2-4768-864d-afbe9ff57629" containerName="oc" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.035855 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.061608 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pjdzr"] Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.112861 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lwn4\" (UniqueName: \"kubernetes.io/projected/52183c85-6349-44f1-b92b-f44921299e07-kube-api-access-2lwn4\") pod \"redhat-operators-pjdzr\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.112928 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-utilities\") pod \"redhat-operators-pjdzr\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.113088 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-catalog-content\") pod \"redhat-operators-pjdzr\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.214434 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-catalog-content\") pod \"redhat-operators-pjdzr\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.214530 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lwn4\" (UniqueName: \"kubernetes.io/projected/52183c85-6349-44f1-b92b-f44921299e07-kube-api-access-2lwn4\") pod \"redhat-operators-pjdzr\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.214557 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-utilities\") pod \"redhat-operators-pjdzr\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.215235 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-utilities\") pod \"redhat-operators-pjdzr\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.215497 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-catalog-content\") pod \"redhat-operators-pjdzr\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.246211 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lwn4\" (UniqueName: \"kubernetes.io/projected/52183c85-6349-44f1-b92b-f44921299e07-kube-api-access-2lwn4\") pod \"redhat-operators-pjdzr\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.392680 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.828077 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pjdzr"] Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.977029 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjdzr" event={"ID":"52183c85-6349-44f1-b92b-f44921299e07","Type":"ContainerStarted","Data":"0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8"} Feb 27 17:29:30 crc kubenswrapper[4751]: I0227 17:29:30.977077 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjdzr" event={"ID":"52183c85-6349-44f1-b92b-f44921299e07","Type":"ContainerStarted","Data":"7bc1d89b0092266d887e48ad362083e289e1d89530c350a923bbb66c9de7311b"} Feb 27 17:29:31 crc kubenswrapper[4751]: I0227 17:29:31.990959 4751 generic.go:334] "Generic (PLEG): container finished" podID="52183c85-6349-44f1-b92b-f44921299e07" containerID="0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8" exitCode=0 Feb 27 17:29:31 crc kubenswrapper[4751]: I0227 17:29:31.991075 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjdzr" event={"ID":"52183c85-6349-44f1-b92b-f44921299e07","Type":"ContainerDied","Data":"0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8"} Feb 27 17:29:31 crc kubenswrapper[4751]: I0227 17:29:31.994083 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:29:34 crc kubenswrapper[4751]: I0227 17:29:34.018347 4751 generic.go:334] "Generic (PLEG): container finished" podID="52183c85-6349-44f1-b92b-f44921299e07" containerID="64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24" exitCode=0 Feb 27 17:29:34 crc kubenswrapper[4751]: I0227 17:29:34.018518 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjdzr" event={"ID":"52183c85-6349-44f1-b92b-f44921299e07","Type":"ContainerDied","Data":"64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24"} Feb 27 17:29:36 crc kubenswrapper[4751]: I0227 17:29:36.044001 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjdzr" event={"ID":"52183c85-6349-44f1-b92b-f44921299e07","Type":"ContainerStarted","Data":"f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13"} Feb 27 17:29:36 crc kubenswrapper[4751]: I0227 17:29:36.083853 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pjdzr" podStartSLOduration=4.402524839 podStartE2EDuration="7.083838263s" podCreationTimestamp="2026-02-27 17:29:29 +0000 UTC" firstStartedPulling="2026-02-27 17:29:31.993713953 +0000 UTC m=+3934.140728440" lastFinishedPulling="2026-02-27 17:29:34.675027417 +0000 UTC m=+3936.822041864" observedRunningTime="2026-02-27 17:29:36.075115666 +0000 UTC m=+3938.222130153" watchObservedRunningTime="2026-02-27 17:29:36.083838263 +0000 UTC m=+3938.230852710" Feb 27 17:29:40 crc kubenswrapper[4751]: I0227 17:29:40.393813 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:40 crc kubenswrapper[4751]: I0227 17:29:40.394599 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:40 crc kubenswrapper[4751]: I0227 17:29:40.522170 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:29:40 crc kubenswrapper[4751]: E0227 17:29:40.523213 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:29:41 crc kubenswrapper[4751]: I0227 17:29:41.474708 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pjdzr" podUID="52183c85-6349-44f1-b92b-f44921299e07" containerName="registry-server" probeResult="failure" output=< Feb 27 17:29:41 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 17:29:41 crc kubenswrapper[4751]: > Feb 27 17:29:50 crc kubenswrapper[4751]: I0227 17:29:50.454350 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:50 crc kubenswrapper[4751]: I0227 17:29:50.515605 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:50 crc kubenswrapper[4751]: I0227 17:29:50.706202 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pjdzr"] Feb 27 17:29:51 crc kubenswrapper[4751]: I0227 17:29:51.520568 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:29:51 crc kubenswrapper[4751]: E0227 17:29:51.520847 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.193939 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pjdzr" podUID="52183c85-6349-44f1-b92b-f44921299e07" containerName="registry-server" containerID="cri-o://f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13" gracePeriod=2 Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.661549 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.791610 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-utilities\") pod \"52183c85-6349-44f1-b92b-f44921299e07\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.792432 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-utilities" (OuterVolumeSpecName: "utilities") pod "52183c85-6349-44f1-b92b-f44921299e07" (UID: "52183c85-6349-44f1-b92b-f44921299e07"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.792511 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lwn4\" (UniqueName: \"kubernetes.io/projected/52183c85-6349-44f1-b92b-f44921299e07-kube-api-access-2lwn4\") pod \"52183c85-6349-44f1-b92b-f44921299e07\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.792551 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-catalog-content\") pod \"52183c85-6349-44f1-b92b-f44921299e07\" (UID: \"52183c85-6349-44f1-b92b-f44921299e07\") " Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.793755 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.801020 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52183c85-6349-44f1-b92b-f44921299e07-kube-api-access-2lwn4" (OuterVolumeSpecName: "kube-api-access-2lwn4") pod "52183c85-6349-44f1-b92b-f44921299e07" (UID: "52183c85-6349-44f1-b92b-f44921299e07"). InnerVolumeSpecName "kube-api-access-2lwn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.895046 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lwn4\" (UniqueName: \"kubernetes.io/projected/52183c85-6349-44f1-b92b-f44921299e07-kube-api-access-2lwn4\") on node \"crc\" DevicePath \"\"" Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.982678 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52183c85-6349-44f1-b92b-f44921299e07" (UID: "52183c85-6349-44f1-b92b-f44921299e07"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:29:52 crc kubenswrapper[4751]: I0227 17:29:52.996516 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52183c85-6349-44f1-b92b-f44921299e07-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.208101 4751 generic.go:334] "Generic (PLEG): container finished" podID="52183c85-6349-44f1-b92b-f44921299e07" containerID="f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13" exitCode=0 Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.208168 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjdzr" Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.208175 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjdzr" event={"ID":"52183c85-6349-44f1-b92b-f44921299e07","Type":"ContainerDied","Data":"f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13"} Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.208333 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjdzr" event={"ID":"52183c85-6349-44f1-b92b-f44921299e07","Type":"ContainerDied","Data":"7bc1d89b0092266d887e48ad362083e289e1d89530c350a923bbb66c9de7311b"} Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.208374 4751 scope.go:117] "RemoveContainer" containerID="f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13" Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.251027 4751 scope.go:117] "RemoveContainer" containerID="64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24" Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.265468 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pjdzr"] Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.277068 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pjdzr"] Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.303622 4751 scope.go:117] "RemoveContainer" containerID="0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8" Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.322127 4751 scope.go:117] "RemoveContainer" containerID="f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13" Feb 27 17:29:53 crc kubenswrapper[4751]: E0227 17:29:53.322441 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13\": container with ID starting with f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13 not found: ID does not exist" containerID="f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13" Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.322493 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13"} err="failed to get container status \"f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13\": rpc error: code = NotFound desc = could not find container \"f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13\": container with ID starting with f51bca370ca987657991c261f46d544987ae4b40b83a49a398be156e4fb33f13 not found: ID does not exist" Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.322513 4751 scope.go:117] "RemoveContainer" containerID="64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24" Feb 27 17:29:53 crc kubenswrapper[4751]: E0227 17:29:53.322947 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24\": container with ID starting with 64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24 not found: ID does not exist" containerID="64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24" Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.322979 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24"} err="failed to get container status \"64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24\": rpc error: code = NotFound desc = could not find container \"64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24\": container with ID starting with 64a96814e248a1e82b923f5a11e2d53f76a81ab5f372d8b56a81058163111b24 not found: ID does not exist" Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.322995 4751 scope.go:117] "RemoveContainer" containerID="0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8" Feb 27 17:29:53 crc kubenswrapper[4751]: E0227 17:29:53.323315 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8\": container with ID starting with 0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8 not found: ID does not exist" containerID="0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8" Feb 27 17:29:53 crc kubenswrapper[4751]: I0227 17:29:53.323358 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8"} err="failed to get container status \"0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8\": rpc error: code = NotFound desc = could not find container \"0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8\": container with ID starting with 0f68813901772eac26ef732807a4955c8d213f3942c17a25acf84868121531e8 not found: ID does not exist" Feb 27 17:29:54 crc kubenswrapper[4751]: I0227 17:29:54.535906 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52183c85-6349-44f1-b92b-f44921299e07" path="/var/lib/kubelet/pods/52183c85-6349-44f1-b92b-f44921299e07/volumes" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.191019 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536890-2cgdd"] Feb 27 17:30:00 crc kubenswrapper[4751]: E0227 17:30:00.192122 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52183c85-6349-44f1-b92b-f44921299e07" containerName="extract-utilities" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.192143 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="52183c85-6349-44f1-b92b-f44921299e07" containerName="extract-utilities" Feb 27 17:30:00 crc kubenswrapper[4751]: E0227 17:30:00.192165 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52183c85-6349-44f1-b92b-f44921299e07" containerName="registry-server" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.192177 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="52183c85-6349-44f1-b92b-f44921299e07" containerName="registry-server" Feb 27 17:30:00 crc kubenswrapper[4751]: E0227 17:30:00.192191 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52183c85-6349-44f1-b92b-f44921299e07" containerName="extract-content" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.192201 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="52183c85-6349-44f1-b92b-f44921299e07" containerName="extract-content" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.192487 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="52183c85-6349-44f1-b92b-f44921299e07" containerName="registry-server" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.193193 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536890-2cgdd" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.196810 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.197125 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.197416 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.200943 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8"] Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.202352 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.205339 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.205369 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.211080 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536890-2cgdd"] Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.229140 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8"] Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.318055 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d3a3a303-1429-4ea3-aa4c-5700c272d308-secret-volume\") pod \"collect-profiles-29536890-rzrw8\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.318130 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frqb7\" (UniqueName: \"kubernetes.io/projected/242d2685-aabe-4c87-b911-8590dd4b9333-kube-api-access-frqb7\") pod \"auto-csr-approver-29536890-2cgdd\" (UID: \"242d2685-aabe-4c87-b911-8590dd4b9333\") " pod="openshift-infra/auto-csr-approver-29536890-2cgdd" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.318183 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvcdq\" (UniqueName: \"kubernetes.io/projected/d3a3a303-1429-4ea3-aa4c-5700c272d308-kube-api-access-vvcdq\") pod \"collect-profiles-29536890-rzrw8\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.318211 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d3a3a303-1429-4ea3-aa4c-5700c272d308-config-volume\") pod \"collect-profiles-29536890-rzrw8\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.419979 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvcdq\" (UniqueName: \"kubernetes.io/projected/d3a3a303-1429-4ea3-aa4c-5700c272d308-kube-api-access-vvcdq\") pod \"collect-profiles-29536890-rzrw8\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.420036 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d3a3a303-1429-4ea3-aa4c-5700c272d308-config-volume\") pod \"collect-profiles-29536890-rzrw8\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.420096 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d3a3a303-1429-4ea3-aa4c-5700c272d308-secret-volume\") pod \"collect-profiles-29536890-rzrw8\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.420162 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frqb7\" (UniqueName: \"kubernetes.io/projected/242d2685-aabe-4c87-b911-8590dd4b9333-kube-api-access-frqb7\") pod \"auto-csr-approver-29536890-2cgdd\" (UID: \"242d2685-aabe-4c87-b911-8590dd4b9333\") " pod="openshift-infra/auto-csr-approver-29536890-2cgdd" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.421080 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d3a3a303-1429-4ea3-aa4c-5700c272d308-config-volume\") pod \"collect-profiles-29536890-rzrw8\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.441233 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frqb7\" (UniqueName: \"kubernetes.io/projected/242d2685-aabe-4c87-b911-8590dd4b9333-kube-api-access-frqb7\") pod \"auto-csr-approver-29536890-2cgdd\" (UID: \"242d2685-aabe-4c87-b911-8590dd4b9333\") " pod="openshift-infra/auto-csr-approver-29536890-2cgdd" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.441496 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d3a3a303-1429-4ea3-aa4c-5700c272d308-secret-volume\") pod \"collect-profiles-29536890-rzrw8\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.455993 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvcdq\" (UniqueName: \"kubernetes.io/projected/d3a3a303-1429-4ea3-aa4c-5700c272d308-kube-api-access-vvcdq\") pod \"collect-profiles-29536890-rzrw8\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.507366 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536890-2cgdd" Feb 27 17:30:00 crc kubenswrapper[4751]: I0227 17:30:00.521381 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:01 crc kubenswrapper[4751]: I0227 17:30:01.025211 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536890-2cgdd"] Feb 27 17:30:01 crc kubenswrapper[4751]: I0227 17:30:01.082472 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8"] Feb 27 17:30:01 crc kubenswrapper[4751]: W0227 17:30:01.097835 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3a3a303_1429_4ea3_aa4c_5700c272d308.slice/crio-9c63c14de2946d0cebf071ef3e58551dcb0c76c14768daa2c79bb8151b12b1de WatchSource:0}: Error finding container 9c63c14de2946d0cebf071ef3e58551dcb0c76c14768daa2c79bb8151b12b1de: Status 404 returned error can't find the container with id 9c63c14de2946d0cebf071ef3e58551dcb0c76c14768daa2c79bb8151b12b1de Feb 27 17:30:01 crc kubenswrapper[4751]: I0227 17:30:01.280047 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536890-2cgdd" event={"ID":"242d2685-aabe-4c87-b911-8590dd4b9333","Type":"ContainerStarted","Data":"633b839657a675e76813ee08d24e4d2923cd0f5fcd057f10a462e834b646e120"} Feb 27 17:30:01 crc kubenswrapper[4751]: I0227 17:30:01.282258 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" event={"ID":"d3a3a303-1429-4ea3-aa4c-5700c272d308","Type":"ContainerStarted","Data":"9c63c14de2946d0cebf071ef3e58551dcb0c76c14768daa2c79bb8151b12b1de"} Feb 27 17:30:01 crc kubenswrapper[4751]: I0227 17:30:01.313124 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" podStartSLOduration=1.3131016930000001 podStartE2EDuration="1.313101693s" podCreationTimestamp="2026-02-27 17:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:30:01.306257805 +0000 UTC m=+3963.453272272" watchObservedRunningTime="2026-02-27 17:30:01.313101693 +0000 UTC m=+3963.460116150" Feb 27 17:30:02 crc kubenswrapper[4751]: I0227 17:30:02.296252 4751 generic.go:334] "Generic (PLEG): container finished" podID="d3a3a303-1429-4ea3-aa4c-5700c272d308" containerID="c4e3db0429d938f8cb0662eead247c3f3b589d9503ae1d3a1b23bcafb1583630" exitCode=0 Feb 27 17:30:02 crc kubenswrapper[4751]: I0227 17:30:02.296342 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" event={"ID":"d3a3a303-1429-4ea3-aa4c-5700c272d308","Type":"ContainerDied","Data":"c4e3db0429d938f8cb0662eead247c3f3b589d9503ae1d3a1b23bcafb1583630"} Feb 27 17:30:02 crc kubenswrapper[4751]: I0227 17:30:02.521119 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:30:02 crc kubenswrapper[4751]: E0227 17:30:02.521525 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.326903 4751 generic.go:334] "Generic (PLEG): container finished" podID="242d2685-aabe-4c87-b911-8590dd4b9333" containerID="b7ae1b30a8cd1b186961247c15923d3acaeadc5d4f8ad67e9db921ab9893636f" exitCode=0 Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.327158 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536890-2cgdd" event={"ID":"242d2685-aabe-4c87-b911-8590dd4b9333","Type":"ContainerDied","Data":"b7ae1b30a8cd1b186961247c15923d3acaeadc5d4f8ad67e9db921ab9893636f"} Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.731784 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.776648 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvcdq\" (UniqueName: \"kubernetes.io/projected/d3a3a303-1429-4ea3-aa4c-5700c272d308-kube-api-access-vvcdq\") pod \"d3a3a303-1429-4ea3-aa4c-5700c272d308\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.776738 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d3a3a303-1429-4ea3-aa4c-5700c272d308-config-volume\") pod \"d3a3a303-1429-4ea3-aa4c-5700c272d308\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.776790 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d3a3a303-1429-4ea3-aa4c-5700c272d308-secret-volume\") pod \"d3a3a303-1429-4ea3-aa4c-5700c272d308\" (UID: \"d3a3a303-1429-4ea3-aa4c-5700c272d308\") " Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.778092 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3a3a303-1429-4ea3-aa4c-5700c272d308-config-volume" (OuterVolumeSpecName: "config-volume") pod "d3a3a303-1429-4ea3-aa4c-5700c272d308" (UID: "d3a3a303-1429-4ea3-aa4c-5700c272d308"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.786636 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3a3a303-1429-4ea3-aa4c-5700c272d308-kube-api-access-vvcdq" (OuterVolumeSpecName: "kube-api-access-vvcdq") pod "d3a3a303-1429-4ea3-aa4c-5700c272d308" (UID: "d3a3a303-1429-4ea3-aa4c-5700c272d308"). InnerVolumeSpecName "kube-api-access-vvcdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.786741 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3a3a303-1429-4ea3-aa4c-5700c272d308-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d3a3a303-1429-4ea3-aa4c-5700c272d308" (UID: "d3a3a303-1429-4ea3-aa4c-5700c272d308"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.878764 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvcdq\" (UniqueName: \"kubernetes.io/projected/d3a3a303-1429-4ea3-aa4c-5700c272d308-kube-api-access-vvcdq\") on node \"crc\" DevicePath \"\"" Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.878826 4751 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d3a3a303-1429-4ea3-aa4c-5700c272d308-config-volume\") on node \"crc\" DevicePath \"\"" Feb 27 17:30:03 crc kubenswrapper[4751]: I0227 17:30:03.878847 4751 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d3a3a303-1429-4ea3-aa4c-5700c272d308-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 27 17:30:04 crc kubenswrapper[4751]: I0227 17:30:04.345687 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" event={"ID":"d3a3a303-1429-4ea3-aa4c-5700c272d308","Type":"ContainerDied","Data":"9c63c14de2946d0cebf071ef3e58551dcb0c76c14768daa2c79bb8151b12b1de"} Feb 27 17:30:04 crc kubenswrapper[4751]: I0227 17:30:04.346665 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c63c14de2946d0cebf071ef3e58551dcb0c76c14768daa2c79bb8151b12b1de" Feb 27 17:30:04 crc kubenswrapper[4751]: I0227 17:30:04.345728 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536890-rzrw8" Feb 27 17:30:04 crc kubenswrapper[4751]: I0227 17:30:04.414162 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn"] Feb 27 17:30:04 crc kubenswrapper[4751]: I0227 17:30:04.420584 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536845-gfnmn"] Feb 27 17:30:04 crc kubenswrapper[4751]: I0227 17:30:04.557323 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68ad3b27-f8c4-44fe-8c10-215cf6821391" path="/var/lib/kubelet/pods/68ad3b27-f8c4-44fe-8c10-215cf6821391/volumes" Feb 27 17:30:04 crc kubenswrapper[4751]: I0227 17:30:04.734548 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536890-2cgdd" Feb 27 17:30:04 crc kubenswrapper[4751]: I0227 17:30:04.893282 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frqb7\" (UniqueName: \"kubernetes.io/projected/242d2685-aabe-4c87-b911-8590dd4b9333-kube-api-access-frqb7\") pod \"242d2685-aabe-4c87-b911-8590dd4b9333\" (UID: \"242d2685-aabe-4c87-b911-8590dd4b9333\") " Feb 27 17:30:04 crc kubenswrapper[4751]: I0227 17:30:04.899663 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/242d2685-aabe-4c87-b911-8590dd4b9333-kube-api-access-frqb7" (OuterVolumeSpecName: "kube-api-access-frqb7") pod "242d2685-aabe-4c87-b911-8590dd4b9333" (UID: "242d2685-aabe-4c87-b911-8590dd4b9333"). InnerVolumeSpecName "kube-api-access-frqb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:30:04 crc kubenswrapper[4751]: I0227 17:30:04.994081 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frqb7\" (UniqueName: \"kubernetes.io/projected/242d2685-aabe-4c87-b911-8590dd4b9333-kube-api-access-frqb7\") on node \"crc\" DevicePath \"\"" Feb 27 17:30:05 crc kubenswrapper[4751]: I0227 17:30:05.354297 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536890-2cgdd" event={"ID":"242d2685-aabe-4c87-b911-8590dd4b9333","Type":"ContainerDied","Data":"633b839657a675e76813ee08d24e4d2923cd0f5fcd057f10a462e834b646e120"} Feb 27 17:30:05 crc kubenswrapper[4751]: I0227 17:30:05.354377 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536890-2cgdd" Feb 27 17:30:05 crc kubenswrapper[4751]: I0227 17:30:05.354382 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="633b839657a675e76813ee08d24e4d2923cd0f5fcd057f10a462e834b646e120" Feb 27 17:30:05 crc kubenswrapper[4751]: I0227 17:30:05.815033 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536884-4q8r5"] Feb 27 17:30:05 crc kubenswrapper[4751]: I0227 17:30:05.824887 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536884-4q8r5"] Feb 27 17:30:06 crc kubenswrapper[4751]: I0227 17:30:06.537973 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34401835-367b-47f6-b11d-f546ff10e459" path="/var/lib/kubelet/pods/34401835-367b-47f6-b11d-f546ff10e459/volumes" Feb 27 17:30:15 crc kubenswrapper[4751]: I0227 17:30:15.520897 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:30:15 crc kubenswrapper[4751]: E0227 17:30:15.521585 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:30:28 crc kubenswrapper[4751]: I0227 17:30:28.528277 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:30:28 crc kubenswrapper[4751]: E0227 17:30:28.529935 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:30:40 crc kubenswrapper[4751]: I0227 17:30:40.520557 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:30:40 crc kubenswrapper[4751]: E0227 17:30:40.521179 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:30:40 crc kubenswrapper[4751]: I0227 17:30:40.937757 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vwxrg"] Feb 27 17:30:40 crc kubenswrapper[4751]: E0227 17:30:40.938456 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="242d2685-aabe-4c87-b911-8590dd4b9333" containerName="oc" Feb 27 17:30:40 crc kubenswrapper[4751]: I0227 17:30:40.938476 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="242d2685-aabe-4c87-b911-8590dd4b9333" containerName="oc" Feb 27 17:30:40 crc kubenswrapper[4751]: E0227 17:30:40.938514 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3a3a303-1429-4ea3-aa4c-5700c272d308" containerName="collect-profiles" Feb 27 17:30:40 crc kubenswrapper[4751]: I0227 17:30:40.938524 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3a3a303-1429-4ea3-aa4c-5700c272d308" containerName="collect-profiles" Feb 27 17:30:40 crc kubenswrapper[4751]: I0227 17:30:40.938676 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3a3a303-1429-4ea3-aa4c-5700c272d308" containerName="collect-profiles" Feb 27 17:30:40 crc kubenswrapper[4751]: I0227 17:30:40.938704 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="242d2685-aabe-4c87-b911-8590dd4b9333" containerName="oc" Feb 27 17:30:40 crc kubenswrapper[4751]: I0227 17:30:40.939940 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:40 crc kubenswrapper[4751]: I0227 17:30:40.946670 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vwxrg"] Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.072180 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-catalog-content\") pod \"certified-operators-vwxrg\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.072230 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brklc\" (UniqueName: \"kubernetes.io/projected/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-kube-api-access-brklc\") pod \"certified-operators-vwxrg\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.072305 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-utilities\") pod \"certified-operators-vwxrg\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.173706 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brklc\" (UniqueName: \"kubernetes.io/projected/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-kube-api-access-brklc\") pod \"certified-operators-vwxrg\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.173795 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-utilities\") pod \"certified-operators-vwxrg\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.173866 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-catalog-content\") pod \"certified-operators-vwxrg\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.174324 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-utilities\") pod \"certified-operators-vwxrg\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.174559 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-catalog-content\") pod \"certified-operators-vwxrg\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.194513 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brklc\" (UniqueName: \"kubernetes.io/projected/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-kube-api-access-brklc\") pod \"certified-operators-vwxrg\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.264168 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:41 crc kubenswrapper[4751]: I0227 17:30:41.723793 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vwxrg"] Feb 27 17:30:42 crc kubenswrapper[4751]: I0227 17:30:42.691315 4751 generic.go:334] "Generic (PLEG): container finished" podID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerID="a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3" exitCode=0 Feb 27 17:30:42 crc kubenswrapper[4751]: I0227 17:30:42.691462 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwxrg" event={"ID":"6fbf4483-e292-4992-bbcc-6ded9f37ff2c","Type":"ContainerDied","Data":"a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3"} Feb 27 17:30:42 crc kubenswrapper[4751]: I0227 17:30:42.691777 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwxrg" event={"ID":"6fbf4483-e292-4992-bbcc-6ded9f37ff2c","Type":"ContainerStarted","Data":"94f076329f202ff7d3ca0f057746c0d6cab5730f92417676f4464fb9673464e7"} Feb 27 17:30:44 crc kubenswrapper[4751]: I0227 17:30:44.711271 4751 generic.go:334] "Generic (PLEG): container finished" podID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerID="a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672" exitCode=0 Feb 27 17:30:44 crc kubenswrapper[4751]: I0227 17:30:44.711333 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwxrg" event={"ID":"6fbf4483-e292-4992-bbcc-6ded9f37ff2c","Type":"ContainerDied","Data":"a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672"} Feb 27 17:30:45 crc kubenswrapper[4751]: I0227 17:30:45.721067 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwxrg" event={"ID":"6fbf4483-e292-4992-bbcc-6ded9f37ff2c","Type":"ContainerStarted","Data":"aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1"} Feb 27 17:30:45 crc kubenswrapper[4751]: I0227 17:30:45.751206 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vwxrg" podStartSLOduration=3.316393812 podStartE2EDuration="5.75118867s" podCreationTimestamp="2026-02-27 17:30:40 +0000 UTC" firstStartedPulling="2026-02-27 17:30:42.694084154 +0000 UTC m=+4004.841098611" lastFinishedPulling="2026-02-27 17:30:45.128879012 +0000 UTC m=+4007.275893469" observedRunningTime="2026-02-27 17:30:45.74847277 +0000 UTC m=+4007.895487217" watchObservedRunningTime="2026-02-27 17:30:45.75118867 +0000 UTC m=+4007.898203117" Feb 27 17:30:50 crc kubenswrapper[4751]: I0227 17:30:50.759692 4751 scope.go:117] "RemoveContainer" containerID="fa6b8875e6a1bc0ec06478967be8e1866c347ad855bc60a5b73645b69d51a2e6" Feb 27 17:30:50 crc kubenswrapper[4751]: I0227 17:30:50.793210 4751 scope.go:117] "RemoveContainer" containerID="f49b0d75cfe7a9513ce1bd7b42ff4af9efa4e301002e3adbf6d201f9514a31bd" Feb 27 17:30:50 crc kubenswrapper[4751]: I0227 17:30:50.931751 4751 scope.go:117] "RemoveContainer" containerID="98b9ffa2c36f40f3acfbd70378c91f00c9a0241141446b73950c445770355e21" Feb 27 17:30:51 crc kubenswrapper[4751]: I0227 17:30:51.264756 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:51 crc kubenswrapper[4751]: I0227 17:30:51.264843 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:51 crc kubenswrapper[4751]: I0227 17:30:51.345094 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:51 crc kubenswrapper[4751]: I0227 17:30:51.863397 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:51 crc kubenswrapper[4751]: I0227 17:30:51.935112 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vwxrg"] Feb 27 17:30:53 crc kubenswrapper[4751]: I0227 17:30:53.520691 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:30:53 crc kubenswrapper[4751]: E0227 17:30:53.521375 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:30:53 crc kubenswrapper[4751]: I0227 17:30:53.805622 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vwxrg" podUID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerName="registry-server" containerID="cri-o://aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1" gracePeriod=2 Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.364771 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.477387 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-catalog-content\") pod \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.477519 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-utilities\") pod \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.477669 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brklc\" (UniqueName: \"kubernetes.io/projected/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-kube-api-access-brklc\") pod \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\" (UID: \"6fbf4483-e292-4992-bbcc-6ded9f37ff2c\") " Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.478683 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-utilities" (OuterVolumeSpecName: "utilities") pod "6fbf4483-e292-4992-bbcc-6ded9f37ff2c" (UID: "6fbf4483-e292-4992-bbcc-6ded9f37ff2c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.484722 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-kube-api-access-brklc" (OuterVolumeSpecName: "kube-api-access-brklc") pod "6fbf4483-e292-4992-bbcc-6ded9f37ff2c" (UID: "6fbf4483-e292-4992-bbcc-6ded9f37ff2c"). InnerVolumeSpecName "kube-api-access-brklc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.579549 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brklc\" (UniqueName: \"kubernetes.io/projected/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-kube-api-access-brklc\") on node \"crc\" DevicePath \"\"" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.579586 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.603847 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6fbf4483-e292-4992-bbcc-6ded9f37ff2c" (UID: "6fbf4483-e292-4992-bbcc-6ded9f37ff2c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.681178 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbf4483-e292-4992-bbcc-6ded9f37ff2c-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.817580 4751 generic.go:334] "Generic (PLEG): container finished" podID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerID="aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1" exitCode=0 Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.817666 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwxrg" event={"ID":"6fbf4483-e292-4992-bbcc-6ded9f37ff2c","Type":"ContainerDied","Data":"aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1"} Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.817712 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwxrg" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.818118 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwxrg" event={"ID":"6fbf4483-e292-4992-bbcc-6ded9f37ff2c","Type":"ContainerDied","Data":"94f076329f202ff7d3ca0f057746c0d6cab5730f92417676f4464fb9673464e7"} Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.818349 4751 scope.go:117] "RemoveContainer" containerID="aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.844243 4751 scope.go:117] "RemoveContainer" containerID="a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.869577 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vwxrg"] Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.876180 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vwxrg"] Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.888659 4751 scope.go:117] "RemoveContainer" containerID="a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.917506 4751 scope.go:117] "RemoveContainer" containerID="aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1" Feb 27 17:30:54 crc kubenswrapper[4751]: E0227 17:30:54.918442 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1\": container with ID starting with aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1 not found: ID does not exist" containerID="aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.918501 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1"} err="failed to get container status \"aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1\": rpc error: code = NotFound desc = could not find container \"aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1\": container with ID starting with aeb9aef78d7a49db9de661de1c5d977c4fe89b5812c2372ecafbb327e33a19d1 not found: ID does not exist" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.918653 4751 scope.go:117] "RemoveContainer" containerID="a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672" Feb 27 17:30:54 crc kubenswrapper[4751]: E0227 17:30:54.919688 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672\": container with ID starting with a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672 not found: ID does not exist" containerID="a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.919750 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672"} err="failed to get container status \"a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672\": rpc error: code = NotFound desc = could not find container \"a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672\": container with ID starting with a48ace1c223962478193fba6a15141b0a610ea7e460fb4e8ae849fdc29645672 not found: ID does not exist" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.919787 4751 scope.go:117] "RemoveContainer" containerID="a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3" Feb 27 17:30:54 crc kubenswrapper[4751]: E0227 17:30:54.920805 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3\": container with ID starting with a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3 not found: ID does not exist" containerID="a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3" Feb 27 17:30:54 crc kubenswrapper[4751]: I0227 17:30:54.920839 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3"} err="failed to get container status \"a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3\": rpc error: code = NotFound desc = could not find container \"a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3\": container with ID starting with a4ce3250d36ff9dbd97b8386b2bec65c2e771c3fb6b736a5378684d0cbefecd3 not found: ID does not exist" Feb 27 17:30:56 crc kubenswrapper[4751]: I0227 17:30:56.527488 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" path="/var/lib/kubelet/pods/6fbf4483-e292-4992-bbcc-6ded9f37ff2c/volumes" Feb 27 17:31:08 crc kubenswrapper[4751]: I0227 17:31:08.527630 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:31:08 crc kubenswrapper[4751]: E0227 17:31:08.528626 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:31:21 crc kubenswrapper[4751]: I0227 17:31:21.520941 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:31:21 crc kubenswrapper[4751]: E0227 17:31:21.521974 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:31:34 crc kubenswrapper[4751]: I0227 17:31:34.521216 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:31:34 crc kubenswrapper[4751]: E0227 17:31:34.522498 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:31:45 crc kubenswrapper[4751]: I0227 17:31:45.520662 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:31:45 crc kubenswrapper[4751]: E0227 17:31:45.522102 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:31:51 crc kubenswrapper[4751]: I0227 17:31:51.036214 4751 scope.go:117] "RemoveContainer" containerID="8a52f3336053964477d1bf05c557981322b028180ab67e7f11a404dfdee1c8fb" Feb 27 17:31:51 crc kubenswrapper[4751]: I0227 17:31:51.098311 4751 scope.go:117] "RemoveContainer" containerID="fcacef5496eef0996e6f7cc1b790401d6722dbf389535c9ebe5b72df37f02ef4" Feb 27 17:31:57 crc kubenswrapper[4751]: I0227 17:31:57.522257 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:31:57 crc kubenswrapper[4751]: E0227 17:31:57.525326 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.153837 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536892-vxgb4"] Feb 27 17:32:00 crc kubenswrapper[4751]: E0227 17:32:00.154630 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerName="extract-utilities" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.154651 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerName="extract-utilities" Feb 27 17:32:00 crc kubenswrapper[4751]: E0227 17:32:00.154684 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerName="registry-server" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.154695 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerName="registry-server" Feb 27 17:32:00 crc kubenswrapper[4751]: E0227 17:32:00.154713 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerName="extract-content" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.154725 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerName="extract-content" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.154990 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fbf4483-e292-4992-bbcc-6ded9f37ff2c" containerName="registry-server" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.155687 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536892-vxgb4" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.164932 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536892-vxgb4"] Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.166266 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.166308 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.167199 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.171459 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md6gl\" (UniqueName: \"kubernetes.io/projected/ac429ae1-d578-4812-8334-d7a7c1cf395a-kube-api-access-md6gl\") pod \"auto-csr-approver-29536892-vxgb4\" (UID: \"ac429ae1-d578-4812-8334-d7a7c1cf395a\") " pod="openshift-infra/auto-csr-approver-29536892-vxgb4" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.272810 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md6gl\" (UniqueName: \"kubernetes.io/projected/ac429ae1-d578-4812-8334-d7a7c1cf395a-kube-api-access-md6gl\") pod \"auto-csr-approver-29536892-vxgb4\" (UID: \"ac429ae1-d578-4812-8334-d7a7c1cf395a\") " pod="openshift-infra/auto-csr-approver-29536892-vxgb4" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.301253 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md6gl\" (UniqueName: \"kubernetes.io/projected/ac429ae1-d578-4812-8334-d7a7c1cf395a-kube-api-access-md6gl\") pod \"auto-csr-approver-29536892-vxgb4\" (UID: \"ac429ae1-d578-4812-8334-d7a7c1cf395a\") " pod="openshift-infra/auto-csr-approver-29536892-vxgb4" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.479992 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536892-vxgb4" Feb 27 17:32:00 crc kubenswrapper[4751]: I0227 17:32:00.925300 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536892-vxgb4"] Feb 27 17:32:01 crc kubenswrapper[4751]: I0227 17:32:01.416812 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536892-vxgb4" event={"ID":"ac429ae1-d578-4812-8334-d7a7c1cf395a","Type":"ContainerStarted","Data":"fefc8c91bec475f909bb6bb288432aafe2f9af4eeb793203109a09c67e414b54"} Feb 27 17:32:02 crc kubenswrapper[4751]: I0227 17:32:02.427581 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536892-vxgb4" event={"ID":"ac429ae1-d578-4812-8334-d7a7c1cf395a","Type":"ContainerStarted","Data":"4e7db06f26240c17c03072f1f79f2f90dd5dfd91a27cd71d717cdde158c87d93"} Feb 27 17:32:02 crc kubenswrapper[4751]: I0227 17:32:02.447214 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536892-vxgb4" podStartSLOduration=1.495060071 podStartE2EDuration="2.44719171s" podCreationTimestamp="2026-02-27 17:32:00 +0000 UTC" firstStartedPulling="2026-02-27 17:32:00.943778541 +0000 UTC m=+4083.090793028" lastFinishedPulling="2026-02-27 17:32:01.89591019 +0000 UTC m=+4084.042924667" observedRunningTime="2026-02-27 17:32:02.441287041 +0000 UTC m=+4084.588301538" watchObservedRunningTime="2026-02-27 17:32:02.44719171 +0000 UTC m=+4084.594206167" Feb 27 17:32:03 crc kubenswrapper[4751]: I0227 17:32:03.444225 4751 generic.go:334] "Generic (PLEG): container finished" podID="ac429ae1-d578-4812-8334-d7a7c1cf395a" containerID="4e7db06f26240c17c03072f1f79f2f90dd5dfd91a27cd71d717cdde158c87d93" exitCode=0 Feb 27 17:32:03 crc kubenswrapper[4751]: I0227 17:32:03.444297 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536892-vxgb4" event={"ID":"ac429ae1-d578-4812-8334-d7a7c1cf395a","Type":"ContainerDied","Data":"4e7db06f26240c17c03072f1f79f2f90dd5dfd91a27cd71d717cdde158c87d93"} Feb 27 17:32:04 crc kubenswrapper[4751]: I0227 17:32:04.894903 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536892-vxgb4" Feb 27 17:32:05 crc kubenswrapper[4751]: I0227 17:32:05.051565 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-md6gl\" (UniqueName: \"kubernetes.io/projected/ac429ae1-d578-4812-8334-d7a7c1cf395a-kube-api-access-md6gl\") pod \"ac429ae1-d578-4812-8334-d7a7c1cf395a\" (UID: \"ac429ae1-d578-4812-8334-d7a7c1cf395a\") " Feb 27 17:32:05 crc kubenswrapper[4751]: I0227 17:32:05.062678 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac429ae1-d578-4812-8334-d7a7c1cf395a-kube-api-access-md6gl" (OuterVolumeSpecName: "kube-api-access-md6gl") pod "ac429ae1-d578-4812-8334-d7a7c1cf395a" (UID: "ac429ae1-d578-4812-8334-d7a7c1cf395a"). InnerVolumeSpecName "kube-api-access-md6gl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:32:05 crc kubenswrapper[4751]: I0227 17:32:05.153793 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-md6gl\" (UniqueName: \"kubernetes.io/projected/ac429ae1-d578-4812-8334-d7a7c1cf395a-kube-api-access-md6gl\") on node \"crc\" DevicePath \"\"" Feb 27 17:32:05 crc kubenswrapper[4751]: I0227 17:32:05.462855 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536892-vxgb4" event={"ID":"ac429ae1-d578-4812-8334-d7a7c1cf395a","Type":"ContainerDied","Data":"fefc8c91bec475f909bb6bb288432aafe2f9af4eeb793203109a09c67e414b54"} Feb 27 17:32:05 crc kubenswrapper[4751]: I0227 17:32:05.462894 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fefc8c91bec475f909bb6bb288432aafe2f9af4eeb793203109a09c67e414b54" Feb 27 17:32:05 crc kubenswrapper[4751]: I0227 17:32:05.462945 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536892-vxgb4" Feb 27 17:32:05 crc kubenswrapper[4751]: I0227 17:32:05.533048 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536886-c5hgd"] Feb 27 17:32:05 crc kubenswrapper[4751]: I0227 17:32:05.542155 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536886-c5hgd"] Feb 27 17:32:06 crc kubenswrapper[4751]: I0227 17:32:06.531509 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6247c33-9a8a-404b-aeb6-01b6296624c2" path="/var/lib/kubelet/pods/c6247c33-9a8a-404b-aeb6-01b6296624c2/volumes" Feb 27 17:32:08 crc kubenswrapper[4751]: I0227 17:32:08.528393 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:32:08 crc kubenswrapper[4751]: E0227 17:32:08.529040 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:32:19 crc kubenswrapper[4751]: I0227 17:32:19.520832 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:32:19 crc kubenswrapper[4751]: E0227 17:32:19.523273 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:32:34 crc kubenswrapper[4751]: I0227 17:32:34.522528 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:32:34 crc kubenswrapper[4751]: E0227 17:32:34.523774 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:32:49 crc kubenswrapper[4751]: I0227 17:32:49.520959 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:32:49 crc kubenswrapper[4751]: E0227 17:32:49.521548 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:32:51 crc kubenswrapper[4751]: I0227 17:32:51.172931 4751 scope.go:117] "RemoveContainer" containerID="a57abc270ecaccc908ca246339a9fd528bf7682c02d9e86253642cf2c507857a" Feb 27 17:33:04 crc kubenswrapper[4751]: I0227 17:33:04.522055 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:33:04 crc kubenswrapper[4751]: E0227 17:33:04.523207 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:33:15 crc kubenswrapper[4751]: I0227 17:33:15.520925 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:33:15 crc kubenswrapper[4751]: E0227 17:33:15.522682 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:33:30 crc kubenswrapper[4751]: I0227 17:33:30.521249 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:33:30 crc kubenswrapper[4751]: E0227 17:33:30.522213 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:33:45 crc kubenswrapper[4751]: I0227 17:33:45.521152 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:33:45 crc kubenswrapper[4751]: E0227 17:33:45.522264 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:33:58 crc kubenswrapper[4751]: I0227 17:33:58.531207 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:33:58 crc kubenswrapper[4751]: E0227 17:33:58.532566 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.168075 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536894-rrfr6"] Feb 27 17:34:00 crc kubenswrapper[4751]: E0227 17:34:00.169866 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac429ae1-d578-4812-8334-d7a7c1cf395a" containerName="oc" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.169906 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac429ae1-d578-4812-8334-d7a7c1cf395a" containerName="oc" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.170360 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac429ae1-d578-4812-8334-d7a7c1cf395a" containerName="oc" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.171471 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536894-rrfr6" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.175518 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.175520 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.176112 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.193086 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536894-rrfr6"] Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.248916 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsxdk\" (UniqueName: \"kubernetes.io/projected/4cd0facd-05cb-4d38-bc5e-338bbb9a468c-kube-api-access-jsxdk\") pod \"auto-csr-approver-29536894-rrfr6\" (UID: \"4cd0facd-05cb-4d38-bc5e-338bbb9a468c\") " pod="openshift-infra/auto-csr-approver-29536894-rrfr6" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.351299 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsxdk\" (UniqueName: \"kubernetes.io/projected/4cd0facd-05cb-4d38-bc5e-338bbb9a468c-kube-api-access-jsxdk\") pod \"auto-csr-approver-29536894-rrfr6\" (UID: \"4cd0facd-05cb-4d38-bc5e-338bbb9a468c\") " pod="openshift-infra/auto-csr-approver-29536894-rrfr6" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.392034 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsxdk\" (UniqueName: \"kubernetes.io/projected/4cd0facd-05cb-4d38-bc5e-338bbb9a468c-kube-api-access-jsxdk\") pod \"auto-csr-approver-29536894-rrfr6\" (UID: \"4cd0facd-05cb-4d38-bc5e-338bbb9a468c\") " pod="openshift-infra/auto-csr-approver-29536894-rrfr6" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.503836 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536894-rrfr6" Feb 27 17:34:00 crc kubenswrapper[4751]: I0227 17:34:00.821393 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536894-rrfr6"] Feb 27 17:34:01 crc kubenswrapper[4751]: I0227 17:34:01.575858 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536894-rrfr6" event={"ID":"4cd0facd-05cb-4d38-bc5e-338bbb9a468c","Type":"ContainerStarted","Data":"adf1e7849471ac6be08b65a8fa7bc240e5ea166ea129bbe258b15c09ff2cd649"} Feb 27 17:34:02 crc kubenswrapper[4751]: I0227 17:34:02.585534 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536894-rrfr6" event={"ID":"4cd0facd-05cb-4d38-bc5e-338bbb9a468c","Type":"ContainerStarted","Data":"35b08e8072477e37f8056c94e293abc90c6fee8fec60e6980c833ac4b8fd73d3"} Feb 27 17:34:02 crc kubenswrapper[4751]: I0227 17:34:02.613566 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536894-rrfr6" podStartSLOduration=1.365375394 podStartE2EDuration="2.613538908s" podCreationTimestamp="2026-02-27 17:34:00 +0000 UTC" firstStartedPulling="2026-02-27 17:34:00.848704118 +0000 UTC m=+4202.995718565" lastFinishedPulling="2026-02-27 17:34:02.096867602 +0000 UTC m=+4204.243882079" observedRunningTime="2026-02-27 17:34:02.604507851 +0000 UTC m=+4204.751522328" watchObservedRunningTime="2026-02-27 17:34:02.613538908 +0000 UTC m=+4204.760553385" Feb 27 17:34:03 crc kubenswrapper[4751]: I0227 17:34:03.602114 4751 generic.go:334] "Generic (PLEG): container finished" podID="4cd0facd-05cb-4d38-bc5e-338bbb9a468c" containerID="35b08e8072477e37f8056c94e293abc90c6fee8fec60e6980c833ac4b8fd73d3" exitCode=0 Feb 27 17:34:03 crc kubenswrapper[4751]: I0227 17:34:03.602180 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536894-rrfr6" event={"ID":"4cd0facd-05cb-4d38-bc5e-338bbb9a468c","Type":"ContainerDied","Data":"35b08e8072477e37f8056c94e293abc90c6fee8fec60e6980c833ac4b8fd73d3"} Feb 27 17:34:04 crc kubenswrapper[4751]: I0227 17:34:04.986103 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536894-rrfr6" Feb 27 17:34:05 crc kubenswrapper[4751]: I0227 17:34:05.037861 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsxdk\" (UniqueName: \"kubernetes.io/projected/4cd0facd-05cb-4d38-bc5e-338bbb9a468c-kube-api-access-jsxdk\") pod \"4cd0facd-05cb-4d38-bc5e-338bbb9a468c\" (UID: \"4cd0facd-05cb-4d38-bc5e-338bbb9a468c\") " Feb 27 17:34:05 crc kubenswrapper[4751]: I0227 17:34:05.044183 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cd0facd-05cb-4d38-bc5e-338bbb9a468c-kube-api-access-jsxdk" (OuterVolumeSpecName: "kube-api-access-jsxdk") pod "4cd0facd-05cb-4d38-bc5e-338bbb9a468c" (UID: "4cd0facd-05cb-4d38-bc5e-338bbb9a468c"). InnerVolumeSpecName "kube-api-access-jsxdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:34:05 crc kubenswrapper[4751]: I0227 17:34:05.140003 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsxdk\" (UniqueName: \"kubernetes.io/projected/4cd0facd-05cb-4d38-bc5e-338bbb9a468c-kube-api-access-jsxdk\") on node \"crc\" DevicePath \"\"" Feb 27 17:34:05 crc kubenswrapper[4751]: I0227 17:34:05.652007 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536894-rrfr6" event={"ID":"4cd0facd-05cb-4d38-bc5e-338bbb9a468c","Type":"ContainerDied","Data":"adf1e7849471ac6be08b65a8fa7bc240e5ea166ea129bbe258b15c09ff2cd649"} Feb 27 17:34:05 crc kubenswrapper[4751]: I0227 17:34:05.652065 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="adf1e7849471ac6be08b65a8fa7bc240e5ea166ea129bbe258b15c09ff2cd649" Feb 27 17:34:05 crc kubenswrapper[4751]: I0227 17:34:05.652144 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536894-rrfr6" Feb 27 17:34:05 crc kubenswrapper[4751]: I0227 17:34:05.717744 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536888-8h576"] Feb 27 17:34:05 crc kubenswrapper[4751]: I0227 17:34:05.730806 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536888-8h576"] Feb 27 17:34:06 crc kubenswrapper[4751]: I0227 17:34:06.535872 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec005fa2-d8c2-4768-864d-afbe9ff57629" path="/var/lib/kubelet/pods/ec005fa2-d8c2-4768-864d-afbe9ff57629/volumes" Feb 27 17:34:10 crc kubenswrapper[4751]: I0227 17:34:10.521659 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:34:10 crc kubenswrapper[4751]: E0227 17:34:10.522478 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:34:24 crc kubenswrapper[4751]: I0227 17:34:24.520786 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:34:24 crc kubenswrapper[4751]: E0227 17:34:24.521546 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:34:38 crc kubenswrapper[4751]: I0227 17:34:38.529826 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:34:38 crc kubenswrapper[4751]: I0227 17:34:38.959600 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"8b57134518086cc1b0e253e40a838c2eac9f5ca33579d3a46206b1507590a014"} Feb 27 17:34:51 crc kubenswrapper[4751]: I0227 17:34:51.284951 4751 scope.go:117] "RemoveContainer" containerID="39f23feeda8f7dc1aae75038c62f8ec70f5ee970a8bb62e009ff8c07a52758c4" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.143362 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536896-frp6b"] Feb 27 17:36:00 crc kubenswrapper[4751]: E0227 17:36:00.144364 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cd0facd-05cb-4d38-bc5e-338bbb9a468c" containerName="oc" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.144383 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cd0facd-05cb-4d38-bc5e-338bbb9a468c" containerName="oc" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.146710 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cd0facd-05cb-4d38-bc5e-338bbb9a468c" containerName="oc" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.147853 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536896-frp6b" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.152470 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.152865 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.158325 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.185764 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536896-frp6b"] Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.314882 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmz2c\" (UniqueName: \"kubernetes.io/projected/40fa2fd2-6adb-4544-bbd2-8e0465e66a84-kube-api-access-xmz2c\") pod \"auto-csr-approver-29536896-frp6b\" (UID: \"40fa2fd2-6adb-4544-bbd2-8e0465e66a84\") " pod="openshift-infra/auto-csr-approver-29536896-frp6b" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.418017 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmz2c\" (UniqueName: \"kubernetes.io/projected/40fa2fd2-6adb-4544-bbd2-8e0465e66a84-kube-api-access-xmz2c\") pod \"auto-csr-approver-29536896-frp6b\" (UID: \"40fa2fd2-6adb-4544-bbd2-8e0465e66a84\") " pod="openshift-infra/auto-csr-approver-29536896-frp6b" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.449913 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmz2c\" (UniqueName: \"kubernetes.io/projected/40fa2fd2-6adb-4544-bbd2-8e0465e66a84-kube-api-access-xmz2c\") pod \"auto-csr-approver-29536896-frp6b\" (UID: \"40fa2fd2-6adb-4544-bbd2-8e0465e66a84\") " pod="openshift-infra/auto-csr-approver-29536896-frp6b" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.471386 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536896-frp6b" Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.757539 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536896-frp6b"] Feb 27 17:36:00 crc kubenswrapper[4751]: I0227 17:36:00.777110 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:36:01 crc kubenswrapper[4751]: I0227 17:36:01.654599 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536896-frp6b" event={"ID":"40fa2fd2-6adb-4544-bbd2-8e0465e66a84","Type":"ContainerStarted","Data":"4a48e96643171c81d9c176a68c4967dd9c1c8eff56b8b099f32c87f04b0f9b55"} Feb 27 17:36:02 crc kubenswrapper[4751]: I0227 17:36:02.665418 4751 generic.go:334] "Generic (PLEG): container finished" podID="40fa2fd2-6adb-4544-bbd2-8e0465e66a84" containerID="83003d3384bc71e5269380797723b94dc77b2303b947ba578fd2a8af0a426688" exitCode=0 Feb 27 17:36:02 crc kubenswrapper[4751]: I0227 17:36:02.665680 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536896-frp6b" event={"ID":"40fa2fd2-6adb-4544-bbd2-8e0465e66a84","Type":"ContainerDied","Data":"83003d3384bc71e5269380797723b94dc77b2303b947ba578fd2a8af0a426688"} Feb 27 17:36:04 crc kubenswrapper[4751]: I0227 17:36:04.111199 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536896-frp6b" Feb 27 17:36:04 crc kubenswrapper[4751]: I0227 17:36:04.275463 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmz2c\" (UniqueName: \"kubernetes.io/projected/40fa2fd2-6adb-4544-bbd2-8e0465e66a84-kube-api-access-xmz2c\") pod \"40fa2fd2-6adb-4544-bbd2-8e0465e66a84\" (UID: \"40fa2fd2-6adb-4544-bbd2-8e0465e66a84\") " Feb 27 17:36:04 crc kubenswrapper[4751]: I0227 17:36:04.506700 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40fa2fd2-6adb-4544-bbd2-8e0465e66a84-kube-api-access-xmz2c" (OuterVolumeSpecName: "kube-api-access-xmz2c") pod "40fa2fd2-6adb-4544-bbd2-8e0465e66a84" (UID: "40fa2fd2-6adb-4544-bbd2-8e0465e66a84"). InnerVolumeSpecName "kube-api-access-xmz2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:36:04 crc kubenswrapper[4751]: I0227 17:36:04.580755 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmz2c\" (UniqueName: \"kubernetes.io/projected/40fa2fd2-6adb-4544-bbd2-8e0465e66a84-kube-api-access-xmz2c\") on node \"crc\" DevicePath \"\"" Feb 27 17:36:04 crc kubenswrapper[4751]: I0227 17:36:04.688594 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536896-frp6b" event={"ID":"40fa2fd2-6adb-4544-bbd2-8e0465e66a84","Type":"ContainerDied","Data":"4a48e96643171c81d9c176a68c4967dd9c1c8eff56b8b099f32c87f04b0f9b55"} Feb 27 17:36:04 crc kubenswrapper[4751]: I0227 17:36:04.688656 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a48e96643171c81d9c176a68c4967dd9c1c8eff56b8b099f32c87f04b0f9b55" Feb 27 17:36:04 crc kubenswrapper[4751]: I0227 17:36:04.689148 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536896-frp6b" Feb 27 17:36:05 crc kubenswrapper[4751]: I0227 17:36:05.209179 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536890-2cgdd"] Feb 27 17:36:05 crc kubenswrapper[4751]: I0227 17:36:05.222868 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536890-2cgdd"] Feb 27 17:36:06 crc kubenswrapper[4751]: I0227 17:36:06.539842 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="242d2685-aabe-4c87-b911-8590dd4b9333" path="/var/lib/kubelet/pods/242d2685-aabe-4c87-b911-8590dd4b9333/volumes" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.688284 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zzgjk"] Feb 27 17:36:09 crc kubenswrapper[4751]: E0227 17:36:09.689151 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40fa2fd2-6adb-4544-bbd2-8e0465e66a84" containerName="oc" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.689166 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="40fa2fd2-6adb-4544-bbd2-8e0465e66a84" containerName="oc" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.689380 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="40fa2fd2-6adb-4544-bbd2-8e0465e66a84" containerName="oc" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.690511 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.714465 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zzgjk"] Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.779157 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzq89\" (UniqueName: \"kubernetes.io/projected/93f0d80c-69f7-4186-9c66-d5fe7b762348-kube-api-access-jzq89\") pod \"community-operators-zzgjk\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.779300 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-utilities\") pod \"community-operators-zzgjk\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.779339 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-catalog-content\") pod \"community-operators-zzgjk\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.881094 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-utilities\") pod \"community-operators-zzgjk\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.881169 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-catalog-content\") pod \"community-operators-zzgjk\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.881237 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzq89\" (UniqueName: \"kubernetes.io/projected/93f0d80c-69f7-4186-9c66-d5fe7b762348-kube-api-access-jzq89\") pod \"community-operators-zzgjk\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.881774 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-utilities\") pod \"community-operators-zzgjk\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.881917 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-catalog-content\") pod \"community-operators-zzgjk\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:09 crc kubenswrapper[4751]: I0227 17:36:09.905465 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzq89\" (UniqueName: \"kubernetes.io/projected/93f0d80c-69f7-4186-9c66-d5fe7b762348-kube-api-access-jzq89\") pod \"community-operators-zzgjk\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.054198 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.576187 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zzgjk"] Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.682527 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9n8d6"] Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.684592 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.696623 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9n8d6"] Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.777430 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzgjk" event={"ID":"93f0d80c-69f7-4186-9c66-d5fe7b762348","Type":"ContainerStarted","Data":"0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858"} Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.777471 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzgjk" event={"ID":"93f0d80c-69f7-4186-9c66-d5fe7b762348","Type":"ContainerStarted","Data":"8df195665bbc0f7a940e6f6a0870d1687789eb742a1383fdac0244f58c185f80"} Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.796838 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-utilities\") pod \"redhat-marketplace-9n8d6\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.796883 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdm7w\" (UniqueName: \"kubernetes.io/projected/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-kube-api-access-hdm7w\") pod \"redhat-marketplace-9n8d6\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.796985 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-catalog-content\") pod \"redhat-marketplace-9n8d6\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.898206 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-catalog-content\") pod \"redhat-marketplace-9n8d6\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.898269 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-utilities\") pod \"redhat-marketplace-9n8d6\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.898289 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdm7w\" (UniqueName: \"kubernetes.io/projected/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-kube-api-access-hdm7w\") pod \"redhat-marketplace-9n8d6\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.898815 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-utilities\") pod \"redhat-marketplace-9n8d6\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.898818 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-catalog-content\") pod \"redhat-marketplace-9n8d6\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:10 crc kubenswrapper[4751]: I0227 17:36:10.926734 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdm7w\" (UniqueName: \"kubernetes.io/projected/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-kube-api-access-hdm7w\") pod \"redhat-marketplace-9n8d6\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:11 crc kubenswrapper[4751]: I0227 17:36:11.010840 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:11 crc kubenswrapper[4751]: I0227 17:36:11.456494 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9n8d6"] Feb 27 17:36:11 crc kubenswrapper[4751]: I0227 17:36:11.784377 4751 generic.go:334] "Generic (PLEG): container finished" podID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerID="0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858" exitCode=0 Feb 27 17:36:11 crc kubenswrapper[4751]: I0227 17:36:11.784442 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzgjk" event={"ID":"93f0d80c-69f7-4186-9c66-d5fe7b762348","Type":"ContainerDied","Data":"0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858"} Feb 27 17:36:11 crc kubenswrapper[4751]: I0227 17:36:11.785728 4751 generic.go:334] "Generic (PLEG): container finished" podID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerID="61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb" exitCode=0 Feb 27 17:36:11 crc kubenswrapper[4751]: I0227 17:36:11.785755 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9n8d6" event={"ID":"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300","Type":"ContainerDied","Data":"61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb"} Feb 27 17:36:11 crc kubenswrapper[4751]: I0227 17:36:11.785782 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9n8d6" event={"ID":"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300","Type":"ContainerStarted","Data":"d82bd416d04e6f34c5029538ceb3ece7c2960a6ebb0a09cb42645e58f22e7ec9"} Feb 27 17:36:13 crc kubenswrapper[4751]: I0227 17:36:13.806112 4751 generic.go:334] "Generic (PLEG): container finished" podID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerID="a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736" exitCode=0 Feb 27 17:36:13 crc kubenswrapper[4751]: I0227 17:36:13.806228 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9n8d6" event={"ID":"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300","Type":"ContainerDied","Data":"a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736"} Feb 27 17:36:14 crc kubenswrapper[4751]: I0227 17:36:14.819175 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9n8d6" event={"ID":"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300","Type":"ContainerStarted","Data":"ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3"} Feb 27 17:36:14 crc kubenswrapper[4751]: I0227 17:36:14.852485 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9n8d6" podStartSLOduration=2.202942868 podStartE2EDuration="4.852464941s" podCreationTimestamp="2026-02-27 17:36:10 +0000 UTC" firstStartedPulling="2026-02-27 17:36:11.788012586 +0000 UTC m=+4333.935027033" lastFinishedPulling="2026-02-27 17:36:14.437534629 +0000 UTC m=+4336.584549106" observedRunningTime="2026-02-27 17:36:14.843878967 +0000 UTC m=+4336.990893424" watchObservedRunningTime="2026-02-27 17:36:14.852464941 +0000 UTC m=+4336.999479398" Feb 27 17:36:21 crc kubenswrapper[4751]: I0227 17:36:21.011016 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:21 crc kubenswrapper[4751]: I0227 17:36:21.012040 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:21 crc kubenswrapper[4751]: I0227 17:36:21.088217 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:21 crc kubenswrapper[4751]: I0227 17:36:21.943475 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.076011 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9n8d6"] Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.076793 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9n8d6" podUID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerName="registry-server" containerID="cri-o://ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3" gracePeriod=2 Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.539224 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.648836 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdm7w\" (UniqueName: \"kubernetes.io/projected/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-kube-api-access-hdm7w\") pod \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.649216 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-utilities\") pod \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.649301 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-catalog-content\") pod \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\" (UID: \"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300\") " Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.650774 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-utilities" (OuterVolumeSpecName: "utilities") pod "042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" (UID: "042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.662669 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-kube-api-access-hdm7w" (OuterVolumeSpecName: "kube-api-access-hdm7w") pod "042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" (UID: "042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300"). InnerVolumeSpecName "kube-api-access-hdm7w". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.699702 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" (UID: "042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.751075 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdm7w\" (UniqueName: \"kubernetes.io/projected/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-kube-api-access-hdm7w\") on node \"crc\" DevicePath \"\"" Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.751131 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.751149 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.927277 4751 generic.go:334] "Generic (PLEG): container finished" podID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerID="ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3" exitCode=0 Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.927344 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9n8d6" event={"ID":"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300","Type":"ContainerDied","Data":"ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3"} Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.927358 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9n8d6" Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.927392 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9n8d6" event={"ID":"042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300","Type":"ContainerDied","Data":"d82bd416d04e6f34c5029538ceb3ece7c2960a6ebb0a09cb42645e58f22e7ec9"} Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.927466 4751 scope.go:117] "RemoveContainer" containerID="ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3" Feb 27 17:36:25 crc kubenswrapper[4751]: I0227 17:36:25.958781 4751 scope.go:117] "RemoveContainer" containerID="a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736" Feb 27 17:36:26 crc kubenswrapper[4751]: I0227 17:36:25.991507 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9n8d6"] Feb 27 17:36:26 crc kubenswrapper[4751]: I0227 17:36:26.015179 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9n8d6"] Feb 27 17:36:26 crc kubenswrapper[4751]: I0227 17:36:26.016105 4751 scope.go:117] "RemoveContainer" containerID="61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb" Feb 27 17:36:26 crc kubenswrapper[4751]: I0227 17:36:26.043858 4751 scope.go:117] "RemoveContainer" containerID="ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3" Feb 27 17:36:26 crc kubenswrapper[4751]: E0227 17:36:26.044706 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3\": container with ID starting with ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3 not found: ID does not exist" containerID="ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3" Feb 27 17:36:26 crc kubenswrapper[4751]: I0227 17:36:26.044773 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3"} err="failed to get container status \"ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3\": rpc error: code = NotFound desc = could not find container \"ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3\": container with ID starting with ae562e99e492731fb6068715dc2a9108714638eb74e8bb55afce1ce13eca0ca3 not found: ID does not exist" Feb 27 17:36:26 crc kubenswrapper[4751]: I0227 17:36:26.044814 4751 scope.go:117] "RemoveContainer" containerID="a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736" Feb 27 17:36:26 crc kubenswrapper[4751]: E0227 17:36:26.045274 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736\": container with ID starting with a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736 not found: ID does not exist" containerID="a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736" Feb 27 17:36:26 crc kubenswrapper[4751]: I0227 17:36:26.045464 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736"} err="failed to get container status \"a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736\": rpc error: code = NotFound desc = could not find container \"a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736\": container with ID starting with a3fd5e410a70d1770053462d0bb2d6bf909ae85d0578c7138a948f8de3307736 not found: ID does not exist" Feb 27 17:36:26 crc kubenswrapper[4751]: I0227 17:36:26.045623 4751 scope.go:117] "RemoveContainer" containerID="61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb" Feb 27 17:36:26 crc kubenswrapper[4751]: E0227 17:36:26.046181 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb\": container with ID starting with 61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb not found: ID does not exist" containerID="61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb" Feb 27 17:36:26 crc kubenswrapper[4751]: I0227 17:36:26.046249 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb"} err="failed to get container status \"61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb\": rpc error: code = NotFound desc = could not find container \"61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb\": container with ID starting with 61fa5515e9700aee708aa154786ec91248d521224dbe3903ad56ee71500e1dbb not found: ID does not exist" Feb 27 17:36:26 crc kubenswrapper[4751]: I0227 17:36:26.540712 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" path="/var/lib/kubelet/pods/042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300/volumes" Feb 27 17:36:51 crc kubenswrapper[4751]: I0227 17:36:51.394501 4751 scope.go:117] "RemoveContainer" containerID="b7ae1b30a8cd1b186961247c15923d3acaeadc5d4f8ad67e9db921ab9893636f" Feb 27 17:36:58 crc kubenswrapper[4751]: I0227 17:36:58.918551 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:36:58 crc kubenswrapper[4751]: I0227 17:36:58.919222 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:37:13 crc kubenswrapper[4751]: E0227 17:37:13.243588 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/community-operator-index@sha256=886ecdbcb5b8f90338063f6476072fab73c2a9a65b9f2b3835b7bd01c69794c1/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 27 17:37:13 crc kubenswrapper[4751]: E0227 17:37:13.245879 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jzq89,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-zzgjk_openshift-marketplace(93f0d80c-69f7-4186-9c66-d5fe7b762348): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/community-operator-index@sha256=886ecdbcb5b8f90338063f6476072fab73c2a9a65b9f2b3835b7bd01c69794c1/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:37:13 crc kubenswrapper[4751]: E0227 17:37:13.247246 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/community-operator-index@sha256=886ecdbcb5b8f90338063f6476072fab73c2a9a65b9f2b3835b7bd01c69794c1/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/community-operators-zzgjk" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" Feb 27 17:37:13 crc kubenswrapper[4751]: E0227 17:37:13.402531 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-zzgjk" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" Feb 27 17:37:26 crc kubenswrapper[4751]: E0227 17:37:26.203900 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/community-operator-index@sha256=886ecdbcb5b8f90338063f6476072fab73c2a9a65b9f2b3835b7bd01c69794c1/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 27 17:37:26 crc kubenswrapper[4751]: E0227 17:37:26.205130 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jzq89,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-zzgjk_openshift-marketplace(93f0d80c-69f7-4186-9c66-d5fe7b762348): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/community-operator-index@sha256=886ecdbcb5b8f90338063f6476072fab73c2a9a65b9f2b3835b7bd01c69794c1/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:37:26 crc kubenswrapper[4751]: E0227 17:37:26.206579 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/community-operator-index@sha256=886ecdbcb5b8f90338063f6476072fab73c2a9a65b9f2b3835b7bd01c69794c1/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/community-operators-zzgjk" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" Feb 27 17:37:28 crc kubenswrapper[4751]: I0227 17:37:28.918509 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:37:28 crc kubenswrapper[4751]: I0227 17:37:28.918875 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:37:39 crc kubenswrapper[4751]: E0227 17:37:39.524510 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-zzgjk" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" Feb 27 17:37:51 crc kubenswrapper[4751]: E0227 17:37:51.295891 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/community-operator-index@sha256=886ecdbcb5b8f90338063f6476072fab73c2a9a65b9f2b3835b7bd01c69794c1/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 27 17:37:51 crc kubenswrapper[4751]: E0227 17:37:51.296740 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jzq89,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-zzgjk_openshift-marketplace(93f0d80c-69f7-4186-9c66-d5fe7b762348): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/community-operator-index@sha256=886ecdbcb5b8f90338063f6476072fab73c2a9a65b9f2b3835b7bd01c69794c1/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:37:51 crc kubenswrapper[4751]: E0227 17:37:51.298058 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/community-operator-index@sha256=886ecdbcb5b8f90338063f6476072fab73c2a9a65b9f2b3835b7bd01c69794c1/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/community-operators-zzgjk" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" Feb 27 17:37:58 crc kubenswrapper[4751]: I0227 17:37:58.918948 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:37:58 crc kubenswrapper[4751]: I0227 17:37:58.919482 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:37:58 crc kubenswrapper[4751]: I0227 17:37:58.919554 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 17:37:58 crc kubenswrapper[4751]: I0227 17:37:58.920514 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8b57134518086cc1b0e253e40a838c2eac9f5ca33579d3a46206b1507590a014"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 17:37:58 crc kubenswrapper[4751]: I0227 17:37:58.920615 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://8b57134518086cc1b0e253e40a838c2eac9f5ca33579d3a46206b1507590a014" gracePeriod=600 Feb 27 17:37:59 crc kubenswrapper[4751]: I0227 17:37:59.845557 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="8b57134518086cc1b0e253e40a838c2eac9f5ca33579d3a46206b1507590a014" exitCode=0 Feb 27 17:37:59 crc kubenswrapper[4751]: I0227 17:37:59.845604 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"8b57134518086cc1b0e253e40a838c2eac9f5ca33579d3a46206b1507590a014"} Feb 27 17:37:59 crc kubenswrapper[4751]: I0227 17:37:59.846067 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6"} Feb 27 17:37:59 crc kubenswrapper[4751]: I0227 17:37:59.846102 4751 scope.go:117] "RemoveContainer" containerID="d412b23ed67a7a1e7cfec0a92dc9bc0b8f9e45f3a0c4b120eff4832c6684851f" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.161582 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536898-x7x5b"] Feb 27 17:38:00 crc kubenswrapper[4751]: E0227 17:38:00.162511 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerName="extract-utilities" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.162536 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerName="extract-utilities" Feb 27 17:38:00 crc kubenswrapper[4751]: E0227 17:38:00.162563 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerName="extract-content" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.162576 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerName="extract-content" Feb 27 17:38:00 crc kubenswrapper[4751]: E0227 17:38:00.162615 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerName="registry-server" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.162628 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerName="registry-server" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.162916 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="042b4a96-d82e-4ae6-a9b9-4c1e2f3fe300" containerName="registry-server" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.163702 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.167208 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.167678 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.168032 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.176061 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536898-x7x5b"] Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.264664 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlxqx\" (UniqueName: \"kubernetes.io/projected/bebb76fd-da0e-4c76-a3fe-1c31a40256fc-kube-api-access-jlxqx\") pod \"auto-csr-approver-29536898-x7x5b\" (UID: \"bebb76fd-da0e-4c76-a3fe-1c31a40256fc\") " pod="openshift-infra/auto-csr-approver-29536898-x7x5b" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.366243 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlxqx\" (UniqueName: \"kubernetes.io/projected/bebb76fd-da0e-4c76-a3fe-1c31a40256fc-kube-api-access-jlxqx\") pod \"auto-csr-approver-29536898-x7x5b\" (UID: \"bebb76fd-da0e-4c76-a3fe-1c31a40256fc\") " pod="openshift-infra/auto-csr-approver-29536898-x7x5b" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.400351 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlxqx\" (UniqueName: \"kubernetes.io/projected/bebb76fd-da0e-4c76-a3fe-1c31a40256fc-kube-api-access-jlxqx\") pod \"auto-csr-approver-29536898-x7x5b\" (UID: \"bebb76fd-da0e-4c76-a3fe-1c31a40256fc\") " pod="openshift-infra/auto-csr-approver-29536898-x7x5b" Feb 27 17:38:00 crc kubenswrapper[4751]: I0227 17:38:00.503746 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" Feb 27 17:38:01 crc kubenswrapper[4751]: I0227 17:38:01.051988 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536898-x7x5b"] Feb 27 17:38:01 crc kubenswrapper[4751]: W0227 17:38:01.059217 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbebb76fd_da0e_4c76_a3fe_1c31a40256fc.slice/crio-fe3bf064df12fd8544e528feafa8a344b80ecced8dccc568d73e2b85f03fa772 WatchSource:0}: Error finding container fe3bf064df12fd8544e528feafa8a344b80ecced8dccc568d73e2b85f03fa772: Status 404 returned error can't find the container with id fe3bf064df12fd8544e528feafa8a344b80ecced8dccc568d73e2b85f03fa772 Feb 27 17:38:01 crc kubenswrapper[4751]: I0227 17:38:01.866110 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" event={"ID":"bebb76fd-da0e-4c76-a3fe-1c31a40256fc","Type":"ContainerStarted","Data":"fe3bf064df12fd8544e528feafa8a344b80ecced8dccc568d73e2b85f03fa772"} Feb 27 17:38:02 crc kubenswrapper[4751]: E0227 17:38:02.151978 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:38:02 crc kubenswrapper[4751]: E0227 17:38:02.152227 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:38:02 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:38:02 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jlxqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536898-x7x5b_openshift-infra(bebb76fd-da0e-4c76-a3fe-1c31a40256fc): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:38:02 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:38:02 crc kubenswrapper[4751]: E0227 17:38:02.153528 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:38:02 crc kubenswrapper[4751]: E0227 17:38:02.898596 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:38:05 crc kubenswrapper[4751]: E0227 17:38:05.524242 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-zzgjk" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" Feb 27 17:38:16 crc kubenswrapper[4751]: E0227 17:38:16.524781 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-zzgjk" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" Feb 27 17:38:29 crc kubenswrapper[4751]: E0227 17:38:29.524922 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-zzgjk" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" Feb 27 17:38:43 crc kubenswrapper[4751]: I0227 17:38:43.299948 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzgjk" event={"ID":"93f0d80c-69f7-4186-9c66-d5fe7b762348","Type":"ContainerStarted","Data":"77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324"} Feb 27 17:38:44 crc kubenswrapper[4751]: I0227 17:38:44.314885 4751 generic.go:334] "Generic (PLEG): container finished" podID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerID="77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324" exitCode=0 Feb 27 17:38:44 crc kubenswrapper[4751]: I0227 17:38:44.314957 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzgjk" event={"ID":"93f0d80c-69f7-4186-9c66-d5fe7b762348","Type":"ContainerDied","Data":"77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324"} Feb 27 17:38:45 crc kubenswrapper[4751]: I0227 17:38:45.329323 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzgjk" event={"ID":"93f0d80c-69f7-4186-9c66-d5fe7b762348","Type":"ContainerStarted","Data":"3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790"} Feb 27 17:38:45 crc kubenswrapper[4751]: I0227 17:38:45.368004 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zzgjk" podStartSLOduration=3.352962789 podStartE2EDuration="2m36.367952723s" podCreationTimestamp="2026-02-27 17:36:09 +0000 UTC" firstStartedPulling="2026-02-27 17:36:11.786131947 +0000 UTC m=+4333.933146384" lastFinishedPulling="2026-02-27 17:38:44.801121831 +0000 UTC m=+4486.948136318" observedRunningTime="2026-02-27 17:38:45.352272563 +0000 UTC m=+4487.499287060" watchObservedRunningTime="2026-02-27 17:38:45.367952723 +0000 UTC m=+4487.514967210" Feb 27 17:38:50 crc kubenswrapper[4751]: I0227 17:38:50.054632 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:38:50 crc kubenswrapper[4751]: I0227 17:38:50.055025 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:38:50 crc kubenswrapper[4751]: I0227 17:38:50.129449 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:38:50 crc kubenswrapper[4751]: I0227 17:38:50.449680 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:38:50 crc kubenswrapper[4751]: I0227 17:38:50.512685 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zzgjk"] Feb 27 17:38:52 crc kubenswrapper[4751]: I0227 17:38:52.403652 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zzgjk" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerName="registry-server" containerID="cri-o://3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790" gracePeriod=2 Feb 27 17:38:52 crc kubenswrapper[4751]: I0227 17:38:52.887095 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:38:52 crc kubenswrapper[4751]: I0227 17:38:52.954854 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-catalog-content\") pod \"93f0d80c-69f7-4186-9c66-d5fe7b762348\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " Feb 27 17:38:52 crc kubenswrapper[4751]: I0227 17:38:52.954985 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-utilities\") pod \"93f0d80c-69f7-4186-9c66-d5fe7b762348\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " Feb 27 17:38:52 crc kubenswrapper[4751]: I0227 17:38:52.955265 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzq89\" (UniqueName: \"kubernetes.io/projected/93f0d80c-69f7-4186-9c66-d5fe7b762348-kube-api-access-jzq89\") pod \"93f0d80c-69f7-4186-9c66-d5fe7b762348\" (UID: \"93f0d80c-69f7-4186-9c66-d5fe7b762348\") " Feb 27 17:38:52 crc kubenswrapper[4751]: I0227 17:38:52.956207 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-utilities" (OuterVolumeSpecName: "utilities") pod "93f0d80c-69f7-4186-9c66-d5fe7b762348" (UID: "93f0d80c-69f7-4186-9c66-d5fe7b762348"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:38:52 crc kubenswrapper[4751]: I0227 17:38:52.963631 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93f0d80c-69f7-4186-9c66-d5fe7b762348-kube-api-access-jzq89" (OuterVolumeSpecName: "kube-api-access-jzq89") pod "93f0d80c-69f7-4186-9c66-d5fe7b762348" (UID: "93f0d80c-69f7-4186-9c66-d5fe7b762348"). InnerVolumeSpecName "kube-api-access-jzq89". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.056874 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.056923 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzq89\" (UniqueName: \"kubernetes.io/projected/93f0d80c-69f7-4186-9c66-d5fe7b762348-kube-api-access-jzq89\") on node \"crc\" DevicePath \"\"" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.062876 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93f0d80c-69f7-4186-9c66-d5fe7b762348" (UID: "93f0d80c-69f7-4186-9c66-d5fe7b762348"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.158061 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93f0d80c-69f7-4186-9c66-d5fe7b762348-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.417927 4751 generic.go:334] "Generic (PLEG): container finished" podID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerID="3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790" exitCode=0 Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.417991 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzgjk" event={"ID":"93f0d80c-69f7-4186-9c66-d5fe7b762348","Type":"ContainerDied","Data":"3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790"} Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.418029 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzgjk" event={"ID":"93f0d80c-69f7-4186-9c66-d5fe7b762348","Type":"ContainerDied","Data":"8df195665bbc0f7a940e6f6a0870d1687789eb742a1383fdac0244f58c185f80"} Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.418057 4751 scope.go:117] "RemoveContainer" containerID="3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.418232 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zzgjk" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.460829 4751 scope.go:117] "RemoveContainer" containerID="77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.465093 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zzgjk"] Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.475103 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zzgjk"] Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.496025 4751 scope.go:117] "RemoveContainer" containerID="0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.526890 4751 scope.go:117] "RemoveContainer" containerID="3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790" Feb 27 17:38:53 crc kubenswrapper[4751]: E0227 17:38:53.527358 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790\": container with ID starting with 3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790 not found: ID does not exist" containerID="3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.527470 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790"} err="failed to get container status \"3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790\": rpc error: code = NotFound desc = could not find container \"3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790\": container with ID starting with 3daab00b9081fddb4e490fa0a3882808a389e566948654bdcd5f942313cce790 not found: ID does not exist" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.527509 4751 scope.go:117] "RemoveContainer" containerID="77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324" Feb 27 17:38:53 crc kubenswrapper[4751]: E0227 17:38:53.527981 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324\": container with ID starting with 77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324 not found: ID does not exist" containerID="77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.528016 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324"} err="failed to get container status \"77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324\": rpc error: code = NotFound desc = could not find container \"77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324\": container with ID starting with 77f5770afe633df52b694fa0b0f0f01ed909e4c63576545724ce565e18bb6324 not found: ID does not exist" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.528043 4751 scope.go:117] "RemoveContainer" containerID="0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858" Feb 27 17:38:53 crc kubenswrapper[4751]: E0227 17:38:53.528711 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858\": container with ID starting with 0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858 not found: ID does not exist" containerID="0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858" Feb 27 17:38:53 crc kubenswrapper[4751]: I0227 17:38:53.528779 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858"} err="failed to get container status \"0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858\": rpc error: code = NotFound desc = could not find container \"0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858\": container with ID starting with 0db8b19818b39e9b8178953dac499f58764a4457bff88a12db29d770b1740858 not found: ID does not exist" Feb 27 17:38:54 crc kubenswrapper[4751]: I0227 17:38:54.534200 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" path="/var/lib/kubelet/pods/93f0d80c-69f7-4186-9c66-d5fe7b762348/volumes" Feb 27 17:39:16 crc kubenswrapper[4751]: E0227 17:39:16.373254 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:39:16 crc kubenswrapper[4751]: E0227 17:39:16.374220 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:39:16 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:39:16 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jlxqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536898-x7x5b_openshift-infra(bebb76fd-da0e-4c76-a3fe-1c31a40256fc): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:39:16 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:39:16 crc kubenswrapper[4751]: E0227 17:39:16.376706 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:39:28 crc kubenswrapper[4751]: E0227 17:39:28.533172 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:39:43 crc kubenswrapper[4751]: E0227 17:39:43.515069 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:39:43 crc kubenswrapper[4751]: E0227 17:39:43.515789 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:39:43 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:39:43 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jlxqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536898-x7x5b_openshift-infra(bebb76fd-da0e-4c76-a3fe-1c31a40256fc): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:39:43 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:39:43 crc kubenswrapper[4751]: E0227 17:39:43.516967 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:39:58 crc kubenswrapper[4751]: E0227 17:39:58.532908 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.153941 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536900-nrb6l"] Feb 27 17:40:00 crc kubenswrapper[4751]: E0227 17:40:00.154681 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerName="extract-utilities" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.154705 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerName="extract-utilities" Feb 27 17:40:00 crc kubenswrapper[4751]: E0227 17:40:00.154725 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerName="registry-server" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.154736 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerName="registry-server" Feb 27 17:40:00 crc kubenswrapper[4751]: E0227 17:40:00.154754 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerName="extract-content" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.154764 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerName="extract-content" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.155054 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="93f0d80c-69f7-4186-9c66-d5fe7b762348" containerName="registry-server" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.155736 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.168274 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536900-nrb6l"] Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.253187 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx9hr\" (UniqueName: \"kubernetes.io/projected/372a6de0-a6f7-481f-a15f-8ef33633c626-kube-api-access-lx9hr\") pod \"auto-csr-approver-29536900-nrb6l\" (UID: \"372a6de0-a6f7-481f-a15f-8ef33633c626\") " pod="openshift-infra/auto-csr-approver-29536900-nrb6l" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.354812 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx9hr\" (UniqueName: \"kubernetes.io/projected/372a6de0-a6f7-481f-a15f-8ef33633c626-kube-api-access-lx9hr\") pod \"auto-csr-approver-29536900-nrb6l\" (UID: \"372a6de0-a6f7-481f-a15f-8ef33633c626\") " pod="openshift-infra/auto-csr-approver-29536900-nrb6l" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.384735 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx9hr\" (UniqueName: \"kubernetes.io/projected/372a6de0-a6f7-481f-a15f-8ef33633c626-kube-api-access-lx9hr\") pod \"auto-csr-approver-29536900-nrb6l\" (UID: \"372a6de0-a6f7-481f-a15f-8ef33633c626\") " pod="openshift-infra/auto-csr-approver-29536900-nrb6l" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.481021 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" Feb 27 17:40:00 crc kubenswrapper[4751]: I0227 17:40:00.749185 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536900-nrb6l"] Feb 27 17:40:01 crc kubenswrapper[4751]: I0227 17:40:01.109428 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" event={"ID":"372a6de0-a6f7-481f-a15f-8ef33633c626","Type":"ContainerStarted","Data":"f7129cea096f2dc3cb69a2bcdeca0439d59881e977fa69dd7487a31612b9bf60"} Feb 27 17:40:02 crc kubenswrapper[4751]: E0227 17:40:02.223817 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:40:02 crc kubenswrapper[4751]: E0227 17:40:02.223999 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:40:02 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:40:02 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lx9hr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536900-nrb6l_openshift-infra(372a6de0-a6f7-481f-a15f-8ef33633c626): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:40:02 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:40:02 crc kubenswrapper[4751]: E0227 17:40:02.226092 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" podUID="372a6de0-a6f7-481f-a15f-8ef33633c626" Feb 27 17:40:03 crc kubenswrapper[4751]: E0227 17:40:03.128167 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" podUID="372a6de0-a6f7-481f-a15f-8ef33633c626" Feb 27 17:40:12 crc kubenswrapper[4751]: E0227 17:40:12.523675 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:40:16 crc kubenswrapper[4751]: E0227 17:40:16.327741 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:40:16 crc kubenswrapper[4751]: E0227 17:40:16.328326 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:40:16 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:40:16 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lx9hr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536900-nrb6l_openshift-infra(372a6de0-a6f7-481f-a15f-8ef33633c626): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:40:16 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:40:16 crc kubenswrapper[4751]: E0227 17:40:16.330053 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" podUID="372a6de0-a6f7-481f-a15f-8ef33633c626" Feb 27 17:40:24 crc kubenswrapper[4751]: E0227 17:40:24.359062 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:40:24 crc kubenswrapper[4751]: E0227 17:40:24.359556 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:40:24 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:40:24 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jlxqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536898-x7x5b_openshift-infra(bebb76fd-da0e-4c76-a3fe-1c31a40256fc): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:40:24 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:40:24 crc kubenswrapper[4751]: E0227 17:40:24.360757 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:40:28 crc kubenswrapper[4751]: I0227 17:40:28.918473 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:40:28 crc kubenswrapper[4751]: I0227 17:40:28.918972 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:40:29 crc kubenswrapper[4751]: E0227 17:40:29.523492 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" podUID="372a6de0-a6f7-481f-a15f-8ef33633c626" Feb 27 17:40:38 crc kubenswrapper[4751]: E0227 17:40:38.528895 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.198802 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-b7swh"] Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.201932 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.221758 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b7swh"] Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.348643 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-catalog-content\") pod \"certified-operators-b7swh\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.348814 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdt42\" (UniqueName: \"kubernetes.io/projected/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-kube-api-access-bdt42\") pod \"certified-operators-b7swh\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.349024 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-utilities\") pod \"certified-operators-b7swh\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.449959 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdt42\" (UniqueName: \"kubernetes.io/projected/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-kube-api-access-bdt42\") pod \"certified-operators-b7swh\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.450235 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-utilities\") pod \"certified-operators-b7swh\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.450268 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-catalog-content\") pod \"certified-operators-b7swh\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.450800 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-catalog-content\") pod \"certified-operators-b7swh\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.450968 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-utilities\") pod \"certified-operators-b7swh\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.473777 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdt42\" (UniqueName: \"kubernetes.io/projected/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-kube-api-access-bdt42\") pod \"certified-operators-b7swh\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:41 crc kubenswrapper[4751]: I0227 17:40:41.526302 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:40:42 crc kubenswrapper[4751]: I0227 17:40:42.021531 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b7swh"] Feb 27 17:40:42 crc kubenswrapper[4751]: I0227 17:40:42.486888 4751 generic.go:334] "Generic (PLEG): container finished" podID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerID="d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd" exitCode=0 Feb 27 17:40:42 crc kubenswrapper[4751]: I0227 17:40:42.486940 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7swh" event={"ID":"6fa2b8c3-b236-45df-aecb-d5abdb1d549f","Type":"ContainerDied","Data":"d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd"} Feb 27 17:40:42 crc kubenswrapper[4751]: I0227 17:40:42.487238 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7swh" event={"ID":"6fa2b8c3-b236-45df-aecb-d5abdb1d549f","Type":"ContainerStarted","Data":"5185277d3a7e00c60ae7c952e414cb7340703c69cc1094248a7fc94bec8be32b"} Feb 27 17:40:43 crc kubenswrapper[4751]: E0227 17:40:43.290913 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 27 17:40:43 crc kubenswrapper[4751]: E0227 17:40:43.291080 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bdt42,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-b7swh_openshift-marketplace(6fa2b8c3-b236-45df-aecb-d5abdb1d549f): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:40:43 crc kubenswrapper[4751]: E0227 17:40:43.292325 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:40:43 crc kubenswrapper[4751]: I0227 17:40:43.498795 4751 generic.go:334] "Generic (PLEG): container finished" podID="372a6de0-a6f7-481f-a15f-8ef33633c626" containerID="2785287c5ebfd01517f402eaee5a9bd610daf9fc8bab8c8695da960a70e2b77c" exitCode=0 Feb 27 17:40:43 crc kubenswrapper[4751]: I0227 17:40:43.498887 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" event={"ID":"372a6de0-a6f7-481f-a15f-8ef33633c626","Type":"ContainerDied","Data":"2785287c5ebfd01517f402eaee5a9bd610daf9fc8bab8c8695da960a70e2b77c"} Feb 27 17:40:43 crc kubenswrapper[4751]: E0227 17:40:43.502213 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:40:44 crc kubenswrapper[4751]: I0227 17:40:44.881300 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" Feb 27 17:40:44 crc kubenswrapper[4751]: I0227 17:40:44.928169 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lx9hr\" (UniqueName: \"kubernetes.io/projected/372a6de0-a6f7-481f-a15f-8ef33633c626-kube-api-access-lx9hr\") pod \"372a6de0-a6f7-481f-a15f-8ef33633c626\" (UID: \"372a6de0-a6f7-481f-a15f-8ef33633c626\") " Feb 27 17:40:44 crc kubenswrapper[4751]: I0227 17:40:44.936803 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/372a6de0-a6f7-481f-a15f-8ef33633c626-kube-api-access-lx9hr" (OuterVolumeSpecName: "kube-api-access-lx9hr") pod "372a6de0-a6f7-481f-a15f-8ef33633c626" (UID: "372a6de0-a6f7-481f-a15f-8ef33633c626"). InnerVolumeSpecName "kube-api-access-lx9hr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:40:45 crc kubenswrapper[4751]: I0227 17:40:45.030366 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lx9hr\" (UniqueName: \"kubernetes.io/projected/372a6de0-a6f7-481f-a15f-8ef33633c626-kube-api-access-lx9hr\") on node \"crc\" DevicePath \"\"" Feb 27 17:40:45 crc kubenswrapper[4751]: I0227 17:40:45.521912 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" event={"ID":"372a6de0-a6f7-481f-a15f-8ef33633c626","Type":"ContainerDied","Data":"f7129cea096f2dc3cb69a2bcdeca0439d59881e977fa69dd7487a31612b9bf60"} Feb 27 17:40:45 crc kubenswrapper[4751]: I0227 17:40:45.521944 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536900-nrb6l" Feb 27 17:40:45 crc kubenswrapper[4751]: I0227 17:40:45.521963 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7129cea096f2dc3cb69a2bcdeca0439d59881e977fa69dd7487a31612b9bf60" Feb 27 17:40:45 crc kubenswrapper[4751]: I0227 17:40:45.971499 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536892-vxgb4"] Feb 27 17:40:45 crc kubenswrapper[4751]: I0227 17:40:45.981122 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536892-vxgb4"] Feb 27 17:40:46 crc kubenswrapper[4751]: I0227 17:40:46.537233 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac429ae1-d578-4812-8334-d7a7c1cf395a" path="/var/lib/kubelet/pods/ac429ae1-d578-4812-8334-d7a7c1cf395a/volumes" Feb 27 17:40:50 crc kubenswrapper[4751]: E0227 17:40:50.524073 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:40:51 crc kubenswrapper[4751]: I0227 17:40:51.618539 4751 scope.go:117] "RemoveContainer" containerID="4e7db06f26240c17c03072f1f79f2f90dd5dfd91a27cd71d717cdde158c87d93" Feb 27 17:40:53 crc kubenswrapper[4751]: I0227 17:40:53.924231 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wswp4"] Feb 27 17:40:53 crc kubenswrapper[4751]: E0227 17:40:53.925276 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="372a6de0-a6f7-481f-a15f-8ef33633c626" containerName="oc" Feb 27 17:40:53 crc kubenswrapper[4751]: I0227 17:40:53.925306 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="372a6de0-a6f7-481f-a15f-8ef33633c626" containerName="oc" Feb 27 17:40:53 crc kubenswrapper[4751]: I0227 17:40:53.925710 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="372a6de0-a6f7-481f-a15f-8ef33633c626" containerName="oc" Feb 27 17:40:53 crc kubenswrapper[4751]: I0227 17:40:53.928082 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:53 crc kubenswrapper[4751]: I0227 17:40:53.947000 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wswp4"] Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.087584 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-utilities\") pod \"redhat-operators-wswp4\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.087714 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7fb5\" (UniqueName: \"kubernetes.io/projected/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-kube-api-access-p7fb5\") pod \"redhat-operators-wswp4\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.087861 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-catalog-content\") pod \"redhat-operators-wswp4\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.189021 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-utilities\") pod \"redhat-operators-wswp4\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.189101 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7fb5\" (UniqueName: \"kubernetes.io/projected/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-kube-api-access-p7fb5\") pod \"redhat-operators-wswp4\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.189189 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-catalog-content\") pod \"redhat-operators-wswp4\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.190041 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-utilities\") pod \"redhat-operators-wswp4\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.190261 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-catalog-content\") pod \"redhat-operators-wswp4\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.212945 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7fb5\" (UniqueName: \"kubernetes.io/projected/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-kube-api-access-p7fb5\") pod \"redhat-operators-wswp4\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.269743 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:40:54 crc kubenswrapper[4751]: I0227 17:40:54.777585 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wswp4"] Feb 27 17:40:55 crc kubenswrapper[4751]: E0227 17:40:55.080828 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 27 17:40:55 crc kubenswrapper[4751]: E0227 17:40:55.081008 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bdt42,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-b7swh_openshift-marketplace(6fa2b8c3-b236-45df-aecb-d5abdb1d549f): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:40:55 crc kubenswrapper[4751]: E0227 17:40:55.082202 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:40:55 crc kubenswrapper[4751]: I0227 17:40:55.623304 4751 generic.go:334] "Generic (PLEG): container finished" podID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerID="d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9" exitCode=0 Feb 27 17:40:55 crc kubenswrapper[4751]: I0227 17:40:55.623376 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wswp4" event={"ID":"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1","Type":"ContainerDied","Data":"d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9"} Feb 27 17:40:55 crc kubenswrapper[4751]: I0227 17:40:55.623611 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wswp4" event={"ID":"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1","Type":"ContainerStarted","Data":"fa3d0c9ec7800a9b6fd1265c948f692a896765ac47e0e47b02dac27aed84d5da"} Feb 27 17:40:56 crc kubenswrapper[4751]: E0227 17:40:56.436241 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-operator-index@sha256=340dbaa786c584e5ffe05a0f79571b9c2fe7d16a1a1fb390e5d83b437d7a1ff3/signature-3: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Feb 27 17:40:56 crc kubenswrapper[4751]: E0227 17:40:56.436424 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p7fb5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-wswp4_openshift-marketplace(bbfd9e1d-d4ac-4643-945e-cea1932ec4a1): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-operator-index@sha256=340dbaa786c584e5ffe05a0f79571b9c2fe7d16a1a1fb390e5d83b437d7a1ff3/signature-3: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:40:56 crc kubenswrapper[4751]: E0227 17:40:56.437702 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-operator-index@sha256=340dbaa786c584e5ffe05a0f79571b9c2fe7d16a1a1fb390e5d83b437d7a1ff3/signature-3: status 500 (Internal Server Error)\"" pod="openshift-marketplace/redhat-operators-wswp4" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" Feb 27 17:40:56 crc kubenswrapper[4751]: E0227 17:40:56.633707 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-wswp4" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" Feb 27 17:40:58 crc kubenswrapper[4751]: I0227 17:40:58.918070 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:40:58 crc kubenswrapper[4751]: I0227 17:40:58.918501 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:41:05 crc kubenswrapper[4751]: E0227 17:41:05.524820 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:41:08 crc kubenswrapper[4751]: E0227 17:41:08.532425 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:41:11 crc kubenswrapper[4751]: I0227 17:41:11.521745 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:41:13 crc kubenswrapper[4751]: I0227 17:41:13.787602 4751 generic.go:334] "Generic (PLEG): container finished" podID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerID="b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b" exitCode=0 Feb 27 17:41:13 crc kubenswrapper[4751]: I0227 17:41:13.787707 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wswp4" event={"ID":"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1","Type":"ContainerDied","Data":"b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b"} Feb 27 17:41:15 crc kubenswrapper[4751]: I0227 17:41:15.812224 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wswp4" event={"ID":"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1","Type":"ContainerStarted","Data":"70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058"} Feb 27 17:41:15 crc kubenswrapper[4751]: I0227 17:41:15.843554 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wswp4" podStartSLOduration=4.203319289 podStartE2EDuration="22.84351732s" podCreationTimestamp="2026-02-27 17:40:53 +0000 UTC" firstStartedPulling="2026-02-27 17:40:55.625294476 +0000 UTC m=+4617.772308933" lastFinishedPulling="2026-02-27 17:41:14.265492517 +0000 UTC m=+4636.412506964" observedRunningTime="2026-02-27 17:41:15.841932988 +0000 UTC m=+4637.988947445" watchObservedRunningTime="2026-02-27 17:41:15.84351732 +0000 UTC m=+4637.990531857" Feb 27 17:41:19 crc kubenswrapper[4751]: E0227 17:41:19.522858 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:41:20 crc kubenswrapper[4751]: E0227 17:41:20.514018 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 27 17:41:20 crc kubenswrapper[4751]: E0227 17:41:20.514448 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bdt42,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-b7swh_openshift-marketplace(6fa2b8c3-b236-45df-aecb-d5abdb1d549f): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:41:20 crc kubenswrapper[4751]: E0227 17:41:20.516388 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:41:24 crc kubenswrapper[4751]: I0227 17:41:24.270991 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:41:24 crc kubenswrapper[4751]: I0227 17:41:24.271071 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:41:24 crc kubenswrapper[4751]: I0227 17:41:24.315974 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:41:24 crc kubenswrapper[4751]: I0227 17:41:24.957658 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:41:25 crc kubenswrapper[4751]: I0227 17:41:25.115678 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wswp4"] Feb 27 17:41:26 crc kubenswrapper[4751]: I0227 17:41:26.899535 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wswp4" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerName="registry-server" containerID="cri-o://70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058" gracePeriod=2 Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.406891 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.532927 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-catalog-content\") pod \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.532977 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7fb5\" (UniqueName: \"kubernetes.io/projected/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-kube-api-access-p7fb5\") pod \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.533045 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-utilities\") pod \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\" (UID: \"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1\") " Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.534097 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-utilities" (OuterVolumeSpecName: "utilities") pod "bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" (UID: "bbfd9e1d-d4ac-4643-945e-cea1932ec4a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.543612 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-kube-api-access-p7fb5" (OuterVolumeSpecName: "kube-api-access-p7fb5") pod "bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" (UID: "bbfd9e1d-d4ac-4643-945e-cea1932ec4a1"). InnerVolumeSpecName "kube-api-access-p7fb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.634973 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7fb5\" (UniqueName: \"kubernetes.io/projected/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-kube-api-access-p7fb5\") on node \"crc\" DevicePath \"\"" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.635009 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.699689 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" (UID: "bbfd9e1d-d4ac-4643-945e-cea1932ec4a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.737238 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.909740 4751 generic.go:334] "Generic (PLEG): container finished" podID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerID="70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058" exitCode=0 Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.909790 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wswp4" event={"ID":"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1","Type":"ContainerDied","Data":"70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058"} Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.909835 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wswp4" event={"ID":"bbfd9e1d-d4ac-4643-945e-cea1932ec4a1","Type":"ContainerDied","Data":"fa3d0c9ec7800a9b6fd1265c948f692a896765ac47e0e47b02dac27aed84d5da"} Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.909832 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wswp4" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.909858 4751 scope.go:117] "RemoveContainer" containerID="70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.941853 4751 scope.go:117] "RemoveContainer" containerID="b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.970497 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wswp4"] Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.972610 4751 scope.go:117] "RemoveContainer" containerID="d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.988333 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wswp4"] Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.993595 4751 scope.go:117] "RemoveContainer" containerID="70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058" Feb 27 17:41:27 crc kubenswrapper[4751]: E0227 17:41:27.994044 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058\": container with ID starting with 70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058 not found: ID does not exist" containerID="70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.994079 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058"} err="failed to get container status \"70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058\": rpc error: code = NotFound desc = could not find container \"70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058\": container with ID starting with 70eb7dfa8053d3225c7a17cfe91c951fb19cefb522d294158e0d40675172f058 not found: ID does not exist" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.994103 4751 scope.go:117] "RemoveContainer" containerID="b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b" Feb 27 17:41:27 crc kubenswrapper[4751]: E0227 17:41:27.995024 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b\": container with ID starting with b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b not found: ID does not exist" containerID="b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.995060 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b"} err="failed to get container status \"b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b\": rpc error: code = NotFound desc = could not find container \"b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b\": container with ID starting with b21d53846dfbe122472c480d08beffd14e68a603fff8a2c53c643d432107a04b not found: ID does not exist" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.995077 4751 scope.go:117] "RemoveContainer" containerID="d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9" Feb 27 17:41:27 crc kubenswrapper[4751]: E0227 17:41:27.995460 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9\": container with ID starting with d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9 not found: ID does not exist" containerID="d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9" Feb 27 17:41:27 crc kubenswrapper[4751]: I0227 17:41:27.995487 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9"} err="failed to get container status \"d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9\": rpc error: code = NotFound desc = could not find container \"d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9\": container with ID starting with d9a7b0a0aff20f914d7aa208c4474fae359c04d2bc2b1ec7d0dbf698514053a9 not found: ID does not exist" Feb 27 17:41:28 crc kubenswrapper[4751]: I0227 17:41:28.531745 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" path="/var/lib/kubelet/pods/bbfd9e1d-d4ac-4643-945e-cea1932ec4a1/volumes" Feb 27 17:41:28 crc kubenswrapper[4751]: I0227 17:41:28.918614 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:41:28 crc kubenswrapper[4751]: I0227 17:41:28.918723 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:41:28 crc kubenswrapper[4751]: I0227 17:41:28.918807 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 17:41:28 crc kubenswrapper[4751]: I0227 17:41:28.920030 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 17:41:28 crc kubenswrapper[4751]: I0227 17:41:28.920180 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" gracePeriod=600 Feb 27 17:41:29 crc kubenswrapper[4751]: E0227 17:41:29.060455 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:41:29 crc kubenswrapper[4751]: I0227 17:41:29.943095 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" exitCode=0 Feb 27 17:41:29 crc kubenswrapper[4751]: I0227 17:41:29.943223 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6"} Feb 27 17:41:29 crc kubenswrapper[4751]: I0227 17:41:29.943567 4751 scope.go:117] "RemoveContainer" containerID="8b57134518086cc1b0e253e40a838c2eac9f5ca33579d3a46206b1507590a014" Feb 27 17:41:29 crc kubenswrapper[4751]: I0227 17:41:29.944473 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:41:29 crc kubenswrapper[4751]: E0227 17:41:29.944880 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:41:30 crc kubenswrapper[4751]: E0227 17:41:30.522483 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:41:33 crc kubenswrapper[4751]: E0227 17:41:33.522086 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:41:41 crc kubenswrapper[4751]: E0227 17:41:41.523232 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:41:44 crc kubenswrapper[4751]: I0227 17:41:44.521196 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:41:44 crc kubenswrapper[4751]: E0227 17:41:44.521712 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:41:45 crc kubenswrapper[4751]: E0227 17:41:45.522552 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:41:55 crc kubenswrapper[4751]: E0227 17:41:55.763996 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:41:55 crc kubenswrapper[4751]: E0227 17:41:55.764672 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:41:55 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:41:55 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jlxqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536898-x7x5b_openshift-infra(bebb76fd-da0e-4c76-a3fe-1c31a40256fc): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:41:55 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:41:55 crc kubenswrapper[4751]: E0227 17:41:55.765981 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:41:56 crc kubenswrapper[4751]: I0227 17:41:56.521153 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:41:56 crc kubenswrapper[4751]: E0227 17:41:56.521951 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:41:58 crc kubenswrapper[4751]: E0227 17:41:58.532799 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.183511 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536902-ljzg6"] Feb 27 17:42:00 crc kubenswrapper[4751]: E0227 17:42:00.184602 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerName="extract-utilities" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.184640 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerName="extract-utilities" Feb 27 17:42:00 crc kubenswrapper[4751]: E0227 17:42:00.184663 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerName="extract-content" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.184680 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerName="extract-content" Feb 27 17:42:00 crc kubenswrapper[4751]: E0227 17:42:00.184747 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerName="registry-server" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.184765 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerName="registry-server" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.185045 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbfd9e1d-d4ac-4643-945e-cea1932ec4a1" containerName="registry-server" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.185879 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.194694 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536902-ljzg6"] Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.260362 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw6ff\" (UniqueName: \"kubernetes.io/projected/b0a39a0b-091c-4aa0-b6e3-400c70578537-kube-api-access-pw6ff\") pod \"auto-csr-approver-29536902-ljzg6\" (UID: \"b0a39a0b-091c-4aa0-b6e3-400c70578537\") " pod="openshift-infra/auto-csr-approver-29536902-ljzg6" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.362094 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw6ff\" (UniqueName: \"kubernetes.io/projected/b0a39a0b-091c-4aa0-b6e3-400c70578537-kube-api-access-pw6ff\") pod \"auto-csr-approver-29536902-ljzg6\" (UID: \"b0a39a0b-091c-4aa0-b6e3-400c70578537\") " pod="openshift-infra/auto-csr-approver-29536902-ljzg6" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.382379 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw6ff\" (UniqueName: \"kubernetes.io/projected/b0a39a0b-091c-4aa0-b6e3-400c70578537-kube-api-access-pw6ff\") pod \"auto-csr-approver-29536902-ljzg6\" (UID: \"b0a39a0b-091c-4aa0-b6e3-400c70578537\") " pod="openshift-infra/auto-csr-approver-29536902-ljzg6" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.521339 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" Feb 27 17:42:00 crc kubenswrapper[4751]: I0227 17:42:00.899042 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536902-ljzg6"] Feb 27 17:42:01 crc kubenswrapper[4751]: I0227 17:42:01.222460 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" event={"ID":"b0a39a0b-091c-4aa0-b6e3-400c70578537","Type":"ContainerStarted","Data":"9e29bd082a06c21cdea8a9108b9619e64f025a5158fb8db2e4ea1edbc6d5da4f"} Feb 27 17:42:02 crc kubenswrapper[4751]: E0227 17:42:02.090278 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:42:02 crc kubenswrapper[4751]: E0227 17:42:02.090495 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:42:02 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:42:02 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pw6ff,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536902-ljzg6_openshift-infra(b0a39a0b-091c-4aa0-b6e3-400c70578537): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:42:02 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:42:02 crc kubenswrapper[4751]: E0227 17:42:02.091780 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" podUID="b0a39a0b-091c-4aa0-b6e3-400c70578537" Feb 27 17:42:02 crc kubenswrapper[4751]: E0227 17:42:02.232369 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" podUID="b0a39a0b-091c-4aa0-b6e3-400c70578537" Feb 27 17:42:06 crc kubenswrapper[4751]: E0227 17:42:06.523551 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:42:07 crc kubenswrapper[4751]: I0227 17:42:07.521345 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:42:07 crc kubenswrapper[4751]: E0227 17:42:07.521721 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:42:13 crc kubenswrapper[4751]: E0227 17:42:13.400103 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 27 17:42:13 crc kubenswrapper[4751]: E0227 17:42:13.403018 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bdt42,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-b7swh_openshift-marketplace(6fa2b8c3-b236-45df-aecb-d5abdb1d549f): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:42:13 crc kubenswrapper[4751]: E0227 17:42:13.404538 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:42:15 crc kubenswrapper[4751]: E0227 17:42:15.466832 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:42:15 crc kubenswrapper[4751]: E0227 17:42:15.467455 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:42:15 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:42:15 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pw6ff,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536902-ljzg6_openshift-infra(b0a39a0b-091c-4aa0-b6e3-400c70578537): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:42:15 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:42:15 crc kubenswrapper[4751]: E0227 17:42:15.468669 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" podUID="b0a39a0b-091c-4aa0-b6e3-400c70578537" Feb 27 17:42:18 crc kubenswrapper[4751]: I0227 17:42:18.528598 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:42:18 crc kubenswrapper[4751]: E0227 17:42:18.529841 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:42:19 crc kubenswrapper[4751]: E0227 17:42:19.523104 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:42:25 crc kubenswrapper[4751]: E0227 17:42:25.524634 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:42:28 crc kubenswrapper[4751]: E0227 17:42:28.532383 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" podUID="b0a39a0b-091c-4aa0-b6e3-400c70578537" Feb 27 17:42:30 crc kubenswrapper[4751]: E0227 17:42:30.523094 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:42:31 crc kubenswrapper[4751]: I0227 17:42:31.520488 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:42:31 crc kubenswrapper[4751]: E0227 17:42:31.521293 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:42:40 crc kubenswrapper[4751]: E0227 17:42:40.523783 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:42:41 crc kubenswrapper[4751]: E0227 17:42:41.523080 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:42:41 crc kubenswrapper[4751]: I0227 17:42:41.607822 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" event={"ID":"b0a39a0b-091c-4aa0-b6e3-400c70578537","Type":"ContainerStarted","Data":"f57fa567b1d9956af2b58c3c1617952b20c3b9d7afadb082e9f693c70f168fa2"} Feb 27 17:42:42 crc kubenswrapper[4751]: I0227 17:42:42.521096 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:42:42 crc kubenswrapper[4751]: E0227 17:42:42.521438 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:42:42 crc kubenswrapper[4751]: I0227 17:42:42.620426 4751 generic.go:334] "Generic (PLEG): container finished" podID="b0a39a0b-091c-4aa0-b6e3-400c70578537" containerID="f57fa567b1d9956af2b58c3c1617952b20c3b9d7afadb082e9f693c70f168fa2" exitCode=0 Feb 27 17:42:42 crc kubenswrapper[4751]: I0227 17:42:42.620475 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" event={"ID":"b0a39a0b-091c-4aa0-b6e3-400c70578537","Type":"ContainerDied","Data":"f57fa567b1d9956af2b58c3c1617952b20c3b9d7afadb082e9f693c70f168fa2"} Feb 27 17:42:44 crc kubenswrapper[4751]: I0227 17:42:44.016663 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" Feb 27 17:42:44 crc kubenswrapper[4751]: I0227 17:42:44.094250 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pw6ff\" (UniqueName: \"kubernetes.io/projected/b0a39a0b-091c-4aa0-b6e3-400c70578537-kube-api-access-pw6ff\") pod \"b0a39a0b-091c-4aa0-b6e3-400c70578537\" (UID: \"b0a39a0b-091c-4aa0-b6e3-400c70578537\") " Feb 27 17:42:44 crc kubenswrapper[4751]: I0227 17:42:44.103680 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0a39a0b-091c-4aa0-b6e3-400c70578537-kube-api-access-pw6ff" (OuterVolumeSpecName: "kube-api-access-pw6ff") pod "b0a39a0b-091c-4aa0-b6e3-400c70578537" (UID: "b0a39a0b-091c-4aa0-b6e3-400c70578537"). InnerVolumeSpecName "kube-api-access-pw6ff". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:42:44 crc kubenswrapper[4751]: I0227 17:42:44.195912 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pw6ff\" (UniqueName: \"kubernetes.io/projected/b0a39a0b-091c-4aa0-b6e3-400c70578537-kube-api-access-pw6ff\") on node \"crc\" DevicePath \"\"" Feb 27 17:42:44 crc kubenswrapper[4751]: I0227 17:42:44.642362 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" event={"ID":"b0a39a0b-091c-4aa0-b6e3-400c70578537","Type":"ContainerDied","Data":"9e29bd082a06c21cdea8a9108b9619e64f025a5158fb8db2e4ea1edbc6d5da4f"} Feb 27 17:42:44 crc kubenswrapper[4751]: I0227 17:42:44.642432 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e29bd082a06c21cdea8a9108b9619e64f025a5158fb8db2e4ea1edbc6d5da4f" Feb 27 17:42:44 crc kubenswrapper[4751]: I0227 17:42:44.642477 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536902-ljzg6" Feb 27 17:42:44 crc kubenswrapper[4751]: I0227 17:42:44.942226 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536894-rrfr6"] Feb 27 17:42:44 crc kubenswrapper[4751]: I0227 17:42:44.947813 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536894-rrfr6"] Feb 27 17:42:46 crc kubenswrapper[4751]: I0227 17:42:46.538999 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cd0facd-05cb-4d38-bc5e-338bbb9a468c" path="/var/lib/kubelet/pods/4cd0facd-05cb-4d38-bc5e-338bbb9a468c/volumes" Feb 27 17:42:51 crc kubenswrapper[4751]: I0227 17:42:51.762996 4751 scope.go:117] "RemoveContainer" containerID="35b08e8072477e37f8056c94e293abc90c6fee8fec60e6980c833ac4b8fd73d3" Feb 27 17:42:53 crc kubenswrapper[4751]: E0227 17:42:53.524564 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:42:54 crc kubenswrapper[4751]: E0227 17:42:54.523502 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:42:56 crc kubenswrapper[4751]: I0227 17:42:56.521136 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:42:56 crc kubenswrapper[4751]: E0227 17:42:56.521546 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:43:05 crc kubenswrapper[4751]: E0227 17:43:05.523308 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:43:07 crc kubenswrapper[4751]: E0227 17:43:07.523111 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:43:08 crc kubenswrapper[4751]: I0227 17:43:08.528486 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:43:08 crc kubenswrapper[4751]: E0227 17:43:08.528946 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.780022 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-8vpb7"] Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.789813 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-8vpb7"] Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.967062 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-v8hld"] Feb 27 17:43:16 crc kubenswrapper[4751]: E0227 17:43:16.967527 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0a39a0b-091c-4aa0-b6e3-400c70578537" containerName="oc" Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.967553 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0a39a0b-091c-4aa0-b6e3-400c70578537" containerName="oc" Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.967787 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0a39a0b-091c-4aa0-b6e3-400c70578537" containerName="oc" Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.968526 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.970772 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.970979 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.973165 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.973345 4751 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-54vq6" Feb 27 17:43:16 crc kubenswrapper[4751]: I0227 17:43:16.990013 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-v8hld"] Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.075584 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/c48e23af-ac41-4fa6-8725-4a78836f2fb3-node-mnt\") pod \"crc-storage-crc-v8hld\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.075738 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gwdk\" (UniqueName: \"kubernetes.io/projected/c48e23af-ac41-4fa6-8725-4a78836f2fb3-kube-api-access-6gwdk\") pod \"crc-storage-crc-v8hld\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.075786 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/c48e23af-ac41-4fa6-8725-4a78836f2fb3-crc-storage\") pod \"crc-storage-crc-v8hld\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.178319 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/c48e23af-ac41-4fa6-8725-4a78836f2fb3-node-mnt\") pod \"crc-storage-crc-v8hld\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.178438 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gwdk\" (UniqueName: \"kubernetes.io/projected/c48e23af-ac41-4fa6-8725-4a78836f2fb3-kube-api-access-6gwdk\") pod \"crc-storage-crc-v8hld\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.178481 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/c48e23af-ac41-4fa6-8725-4a78836f2fb3-crc-storage\") pod \"crc-storage-crc-v8hld\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.178563 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/c48e23af-ac41-4fa6-8725-4a78836f2fb3-node-mnt\") pod \"crc-storage-crc-v8hld\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.180067 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/c48e23af-ac41-4fa6-8725-4a78836f2fb3-crc-storage\") pod \"crc-storage-crc-v8hld\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.199614 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gwdk\" (UniqueName: \"kubernetes.io/projected/c48e23af-ac41-4fa6-8725-4a78836f2fb3-kube-api-access-6gwdk\") pod \"crc-storage-crc-v8hld\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.294030 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.864322 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-v8hld"] Feb 27 17:43:17 crc kubenswrapper[4751]: I0227 17:43:17.960703 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-v8hld" event={"ID":"c48e23af-ac41-4fa6-8725-4a78836f2fb3","Type":"ContainerStarted","Data":"75c839d05dc383b51eb6ab7d899b3b0abebdaa66535e0dbec03bd04d8ef282db"} Feb 27 17:43:18 crc kubenswrapper[4751]: E0227 17:43:18.534380 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:43:18 crc kubenswrapper[4751]: I0227 17:43:18.537561 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54ea9e35-c94b-40f3-b454-55dca7de0349" path="/var/lib/kubelet/pods/54ea9e35-c94b-40f3-b454-55dca7de0349/volumes" Feb 27 17:43:18 crc kubenswrapper[4751]: I0227 17:43:18.971958 4751 generic.go:334] "Generic (PLEG): container finished" podID="c48e23af-ac41-4fa6-8725-4a78836f2fb3" containerID="f70d73883f0010cfa2c91fa8333825ba74e886ac362593b2c70b10ecac4ba886" exitCode=0 Feb 27 17:43:18 crc kubenswrapper[4751]: I0227 17:43:18.972020 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-v8hld" event={"ID":"c48e23af-ac41-4fa6-8725-4a78836f2fb3","Type":"ContainerDied","Data":"f70d73883f0010cfa2c91fa8333825ba74e886ac362593b2c70b10ecac4ba886"} Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.356858 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.436586 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gwdk\" (UniqueName: \"kubernetes.io/projected/c48e23af-ac41-4fa6-8725-4a78836f2fb3-kube-api-access-6gwdk\") pod \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.436684 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/c48e23af-ac41-4fa6-8725-4a78836f2fb3-node-mnt\") pod \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.436757 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/c48e23af-ac41-4fa6-8725-4a78836f2fb3-crc-storage\") pod \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\" (UID: \"c48e23af-ac41-4fa6-8725-4a78836f2fb3\") " Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.437511 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c48e23af-ac41-4fa6-8725-4a78836f2fb3-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "c48e23af-ac41-4fa6-8725-4a78836f2fb3" (UID: "c48e23af-ac41-4fa6-8725-4a78836f2fb3"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.444624 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c48e23af-ac41-4fa6-8725-4a78836f2fb3-kube-api-access-6gwdk" (OuterVolumeSpecName: "kube-api-access-6gwdk") pod "c48e23af-ac41-4fa6-8725-4a78836f2fb3" (UID: "c48e23af-ac41-4fa6-8725-4a78836f2fb3"). InnerVolumeSpecName "kube-api-access-6gwdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.467229 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c48e23af-ac41-4fa6-8725-4a78836f2fb3-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "c48e23af-ac41-4fa6-8725-4a78836f2fb3" (UID: "c48e23af-ac41-4fa6-8725-4a78836f2fb3"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.538601 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gwdk\" (UniqueName: \"kubernetes.io/projected/c48e23af-ac41-4fa6-8725-4a78836f2fb3-kube-api-access-6gwdk\") on node \"crc\" DevicePath \"\"" Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.538662 4751 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/c48e23af-ac41-4fa6-8725-4a78836f2fb3-node-mnt\") on node \"crc\" DevicePath \"\"" Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.538677 4751 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/c48e23af-ac41-4fa6-8725-4a78836f2fb3-crc-storage\") on node \"crc\" DevicePath \"\"" Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.998040 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-v8hld" event={"ID":"c48e23af-ac41-4fa6-8725-4a78836f2fb3","Type":"ContainerDied","Data":"75c839d05dc383b51eb6ab7d899b3b0abebdaa66535e0dbec03bd04d8ef282db"} Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.998115 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75c839d05dc383b51eb6ab7d899b3b0abebdaa66535e0dbec03bd04d8ef282db" Feb 27 17:43:20 crc kubenswrapper[4751]: I0227 17:43:20.998198 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-v8hld" Feb 27 17:43:21 crc kubenswrapper[4751]: E0227 17:43:21.523577 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:43:22 crc kubenswrapper[4751]: I0227 17:43:22.867344 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-v8hld"] Feb 27 17:43:22 crc kubenswrapper[4751]: I0227 17:43:22.877204 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-v8hld"] Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.034320 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-m7n9h"] Feb 27 17:43:23 crc kubenswrapper[4751]: E0227 17:43:23.034685 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c48e23af-ac41-4fa6-8725-4a78836f2fb3" containerName="storage" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.034697 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c48e23af-ac41-4fa6-8725-4a78836f2fb3" containerName="storage" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.034822 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="c48e23af-ac41-4fa6-8725-4a78836f2fb3" containerName="storage" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.035336 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.038158 4751 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-54vq6" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.038840 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.040234 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.041648 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.055203 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-m7n9h"] Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.179216 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-node-mnt\") pod \"crc-storage-crc-m7n9h\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.179292 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-crc-storage\") pod \"crc-storage-crc-m7n9h\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.180149 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84zmz\" (UniqueName: \"kubernetes.io/projected/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-kube-api-access-84zmz\") pod \"crc-storage-crc-m7n9h\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.281708 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-node-mnt\") pod \"crc-storage-crc-m7n9h\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.281792 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-crc-storage\") pod \"crc-storage-crc-m7n9h\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.281915 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84zmz\" (UniqueName: \"kubernetes.io/projected/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-kube-api-access-84zmz\") pod \"crc-storage-crc-m7n9h\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.283985 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-node-mnt\") pod \"crc-storage-crc-m7n9h\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.285387 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-crc-storage\") pod \"crc-storage-crc-m7n9h\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.318375 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84zmz\" (UniqueName: \"kubernetes.io/projected/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-kube-api-access-84zmz\") pod \"crc-storage-crc-m7n9h\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.363476 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:23 crc kubenswrapper[4751]: I0227 17:43:23.858185 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-m7n9h"] Feb 27 17:43:24 crc kubenswrapper[4751]: I0227 17:43:24.032855 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-m7n9h" event={"ID":"c472eefc-fedf-4ec9-b5d1-ec4b643f620e","Type":"ContainerStarted","Data":"f3f4e6c3dbd5e28167625d2d897a62d96173906d88d118010d86545bb43e05e3"} Feb 27 17:43:24 crc kubenswrapper[4751]: I0227 17:43:24.521324 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:43:24 crc kubenswrapper[4751]: E0227 17:43:24.521819 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:43:24 crc kubenswrapper[4751]: I0227 17:43:24.537742 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c48e23af-ac41-4fa6-8725-4a78836f2fb3" path="/var/lib/kubelet/pods/c48e23af-ac41-4fa6-8725-4a78836f2fb3/volumes" Feb 27 17:43:25 crc kubenswrapper[4751]: I0227 17:43:25.043458 4751 generic.go:334] "Generic (PLEG): container finished" podID="c472eefc-fedf-4ec9-b5d1-ec4b643f620e" containerID="7a0f53db116a5277bda90d1e28d16707e825e29f95d334c66f780963479a2d22" exitCode=0 Feb 27 17:43:25 crc kubenswrapper[4751]: I0227 17:43:25.043546 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-m7n9h" event={"ID":"c472eefc-fedf-4ec9-b5d1-ec4b643f620e","Type":"ContainerDied","Data":"7a0f53db116a5277bda90d1e28d16707e825e29f95d334c66f780963479a2d22"} Feb 27 17:43:26 crc kubenswrapper[4751]: I0227 17:43:26.448717 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:26 crc kubenswrapper[4751]: I0227 17:43:26.633241 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-crc-storage\") pod \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " Feb 27 17:43:26 crc kubenswrapper[4751]: I0227 17:43:26.633328 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84zmz\" (UniqueName: \"kubernetes.io/projected/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-kube-api-access-84zmz\") pod \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " Feb 27 17:43:26 crc kubenswrapper[4751]: I0227 17:43:26.633679 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-node-mnt\") pod \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\" (UID: \"c472eefc-fedf-4ec9-b5d1-ec4b643f620e\") " Feb 27 17:43:26 crc kubenswrapper[4751]: I0227 17:43:26.633822 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "c472eefc-fedf-4ec9-b5d1-ec4b643f620e" (UID: "c472eefc-fedf-4ec9-b5d1-ec4b643f620e"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 27 17:43:26 crc kubenswrapper[4751]: I0227 17:43:26.634178 4751 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-node-mnt\") on node \"crc\" DevicePath \"\"" Feb 27 17:43:26 crc kubenswrapper[4751]: I0227 17:43:26.642431 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-kube-api-access-84zmz" (OuterVolumeSpecName: "kube-api-access-84zmz") pod "c472eefc-fedf-4ec9-b5d1-ec4b643f620e" (UID: "c472eefc-fedf-4ec9-b5d1-ec4b643f620e"). InnerVolumeSpecName "kube-api-access-84zmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:43:26 crc kubenswrapper[4751]: I0227 17:43:26.665674 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "c472eefc-fedf-4ec9-b5d1-ec4b643f620e" (UID: "c472eefc-fedf-4ec9-b5d1-ec4b643f620e"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:43:26 crc kubenswrapper[4751]: I0227 17:43:26.735798 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84zmz\" (UniqueName: \"kubernetes.io/projected/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-kube-api-access-84zmz\") on node \"crc\" DevicePath \"\"" Feb 27 17:43:26 crc kubenswrapper[4751]: I0227 17:43:26.735841 4751 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/c472eefc-fedf-4ec9-b5d1-ec4b643f620e-crc-storage\") on node \"crc\" DevicePath \"\"" Feb 27 17:43:27 crc kubenswrapper[4751]: I0227 17:43:27.068382 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-m7n9h" event={"ID":"c472eefc-fedf-4ec9-b5d1-ec4b643f620e","Type":"ContainerDied","Data":"f3f4e6c3dbd5e28167625d2d897a62d96173906d88d118010d86545bb43e05e3"} Feb 27 17:43:27 crc kubenswrapper[4751]: I0227 17:43:27.068441 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3f4e6c3dbd5e28167625d2d897a62d96173906d88d118010d86545bb43e05e3" Feb 27 17:43:27 crc kubenswrapper[4751]: I0227 17:43:27.068495 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-m7n9h" Feb 27 17:43:29 crc kubenswrapper[4751]: E0227 17:43:29.526042 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:43:34 crc kubenswrapper[4751]: E0227 17:43:34.524736 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:43:39 crc kubenswrapper[4751]: I0227 17:43:39.521409 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:43:39 crc kubenswrapper[4751]: E0227 17:43:39.522340 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:43:42 crc kubenswrapper[4751]: E0227 17:43:42.135378 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 27 17:43:42 crc kubenswrapper[4751]: E0227 17:43:42.135795 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bdt42,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-b7swh_openshift-marketplace(6fa2b8c3-b236-45df-aecb-d5abdb1d549f): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:43:42 crc kubenswrapper[4751]: E0227 17:43:42.136937 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:43:47 crc kubenswrapper[4751]: E0227 17:43:47.523851 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:43:51 crc kubenswrapper[4751]: I0227 17:43:51.520217 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:43:51 crc kubenswrapper[4751]: E0227 17:43:51.521014 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:43:51 crc kubenswrapper[4751]: I0227 17:43:51.850489 4751 scope.go:117] "RemoveContainer" containerID="70b4dd0b4546b7bb0db5b503b6ae71ee465d5d71f3169e370364edde9173b814" Feb 27 17:43:52 crc kubenswrapper[4751]: E0227 17:43:52.522330 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:44:00 crc kubenswrapper[4751]: I0227 17:44:00.149054 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536904-bp2m9"] Feb 27 17:44:00 crc kubenswrapper[4751]: E0227 17:44:00.152139 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c472eefc-fedf-4ec9-b5d1-ec4b643f620e" containerName="storage" Feb 27 17:44:00 crc kubenswrapper[4751]: I0227 17:44:00.152278 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="c472eefc-fedf-4ec9-b5d1-ec4b643f620e" containerName="storage" Feb 27 17:44:00 crc kubenswrapper[4751]: I0227 17:44:00.152677 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="c472eefc-fedf-4ec9-b5d1-ec4b643f620e" containerName="storage" Feb 27 17:44:00 crc kubenswrapper[4751]: I0227 17:44:00.153520 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" Feb 27 17:44:00 crc kubenswrapper[4751]: I0227 17:44:00.159310 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536904-bp2m9"] Feb 27 17:44:00 crc kubenswrapper[4751]: I0227 17:44:00.292779 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2vmj\" (UniqueName: \"kubernetes.io/projected/d1e31907-3659-412d-8af8-69bd2998180e-kube-api-access-n2vmj\") pod \"auto-csr-approver-29536904-bp2m9\" (UID: \"d1e31907-3659-412d-8af8-69bd2998180e\") " pod="openshift-infra/auto-csr-approver-29536904-bp2m9" Feb 27 17:44:00 crc kubenswrapper[4751]: I0227 17:44:00.394236 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2vmj\" (UniqueName: \"kubernetes.io/projected/d1e31907-3659-412d-8af8-69bd2998180e-kube-api-access-n2vmj\") pod \"auto-csr-approver-29536904-bp2m9\" (UID: \"d1e31907-3659-412d-8af8-69bd2998180e\") " pod="openshift-infra/auto-csr-approver-29536904-bp2m9" Feb 27 17:44:00 crc kubenswrapper[4751]: I0227 17:44:00.427231 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2vmj\" (UniqueName: \"kubernetes.io/projected/d1e31907-3659-412d-8af8-69bd2998180e-kube-api-access-n2vmj\") pod \"auto-csr-approver-29536904-bp2m9\" (UID: \"d1e31907-3659-412d-8af8-69bd2998180e\") " pod="openshift-infra/auto-csr-approver-29536904-bp2m9" Feb 27 17:44:00 crc kubenswrapper[4751]: I0227 17:44:00.482131 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" Feb 27 17:44:01 crc kubenswrapper[4751]: I0227 17:44:01.000068 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536904-bp2m9"] Feb 27 17:44:01 crc kubenswrapper[4751]: I0227 17:44:01.386386 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" event={"ID":"d1e31907-3659-412d-8af8-69bd2998180e","Type":"ContainerStarted","Data":"8742be293148c46875dfca7dcf147010baf628ac9f473e55402cb50a45483dff"} Feb 27 17:44:02 crc kubenswrapper[4751]: E0227 17:44:02.022834 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:44:02 crc kubenswrapper[4751]: E0227 17:44:02.022994 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:44:02 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:44:02 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n2vmj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536904-bp2m9_openshift-infra(d1e31907-3659-412d-8af8-69bd2998180e): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:44:02 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:44:02 crc kubenswrapper[4751]: E0227 17:44:02.024197 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" podUID="d1e31907-3659-412d-8af8-69bd2998180e" Feb 27 17:44:02 crc kubenswrapper[4751]: E0227 17:44:02.396476 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" podUID="d1e31907-3659-412d-8af8-69bd2998180e" Feb 27 17:44:02 crc kubenswrapper[4751]: E0227 17:44:02.521452 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:44:04 crc kubenswrapper[4751]: I0227 17:44:04.520590 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:44:04 crc kubenswrapper[4751]: E0227 17:44:04.521193 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:44:06 crc kubenswrapper[4751]: E0227 17:44:06.523993 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:44:13 crc kubenswrapper[4751]: E0227 17:44:13.522890 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:44:14 crc kubenswrapper[4751]: E0227 17:44:14.516180 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:44:14 crc kubenswrapper[4751]: E0227 17:44:14.516334 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:44:14 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:44:14 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n2vmj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536904-bp2m9_openshift-infra(d1e31907-3659-412d-8af8-69bd2998180e): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:44:14 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:44:14 crc kubenswrapper[4751]: E0227 17:44:14.518099 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" podUID="d1e31907-3659-412d-8af8-69bd2998180e" Feb 27 17:44:18 crc kubenswrapper[4751]: I0227 17:44:18.527862 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:44:18 crc kubenswrapper[4751]: E0227 17:44:18.528495 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:44:21 crc kubenswrapper[4751]: E0227 17:44:21.523048 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:44:26 crc kubenswrapper[4751]: E0227 17:44:26.523436 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:44:29 crc kubenswrapper[4751]: E0227 17:44:29.524851 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" podUID="d1e31907-3659-412d-8af8-69bd2998180e" Feb 27 17:44:32 crc kubenswrapper[4751]: I0227 17:44:32.521195 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:44:32 crc kubenswrapper[4751]: E0227 17:44:32.521475 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:44:32 crc kubenswrapper[4751]: E0227 17:44:32.522803 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:44:41 crc kubenswrapper[4751]: E0227 17:44:41.440193 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:44:41 crc kubenswrapper[4751]: E0227 17:44:41.440871 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:44:41 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:44:41 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jlxqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536898-x7x5b_openshift-infra(bebb76fd-da0e-4c76-a3fe-1c31a40256fc): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:44:41 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:44:41 crc kubenswrapper[4751]: E0227 17:44:41.442098 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:44:42 crc kubenswrapper[4751]: E0227 17:44:42.406125 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:44:42 crc kubenswrapper[4751]: E0227 17:44:42.406375 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:44:42 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:44:42 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n2vmj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536904-bp2m9_openshift-infra(d1e31907-3659-412d-8af8-69bd2998180e): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:44:42 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:44:42 crc kubenswrapper[4751]: E0227 17:44:42.407650 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" podUID="d1e31907-3659-412d-8af8-69bd2998180e" Feb 27 17:44:43 crc kubenswrapper[4751]: I0227 17:44:43.521225 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:44:43 crc kubenswrapper[4751]: E0227 17:44:43.521795 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:44:43 crc kubenswrapper[4751]: E0227 17:44:43.523464 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:44:55 crc kubenswrapper[4751]: E0227 17:44:55.524034 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:44:56 crc kubenswrapper[4751]: I0227 17:44:56.521265 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:44:56 crc kubenswrapper[4751]: E0227 17:44:56.521695 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:44:56 crc kubenswrapper[4751]: E0227 17:44:56.523003 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" podUID="d1e31907-3659-412d-8af8-69bd2998180e" Feb 27 17:44:58 crc kubenswrapper[4751]: E0227 17:44:58.532752 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.173949 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb"] Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.175472 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.178089 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.178760 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.194749 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb"] Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.276230 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gktrc\" (UniqueName: \"kubernetes.io/projected/32293f8e-224e-47f3-95e8-3bd97ad81d4f-kube-api-access-gktrc\") pod \"collect-profiles-29536905-l2blb\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.276431 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/32293f8e-224e-47f3-95e8-3bd97ad81d4f-secret-volume\") pod \"collect-profiles-29536905-l2blb\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.276469 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/32293f8e-224e-47f3-95e8-3bd97ad81d4f-config-volume\") pod \"collect-profiles-29536905-l2blb\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.377532 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gktrc\" (UniqueName: \"kubernetes.io/projected/32293f8e-224e-47f3-95e8-3bd97ad81d4f-kube-api-access-gktrc\") pod \"collect-profiles-29536905-l2blb\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.377604 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/32293f8e-224e-47f3-95e8-3bd97ad81d4f-secret-volume\") pod \"collect-profiles-29536905-l2blb\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.377624 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/32293f8e-224e-47f3-95e8-3bd97ad81d4f-config-volume\") pod \"collect-profiles-29536905-l2blb\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.378538 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/32293f8e-224e-47f3-95e8-3bd97ad81d4f-config-volume\") pod \"collect-profiles-29536905-l2blb\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.389811 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/32293f8e-224e-47f3-95e8-3bd97ad81d4f-secret-volume\") pod \"collect-profiles-29536905-l2blb\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.415315 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gktrc\" (UniqueName: \"kubernetes.io/projected/32293f8e-224e-47f3-95e8-3bd97ad81d4f-kube-api-access-gktrc\") pod \"collect-profiles-29536905-l2blb\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.506917 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.794913 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb"] Feb 27 17:45:00 crc kubenswrapper[4751]: I0227 17:45:00.902588 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" event={"ID":"32293f8e-224e-47f3-95e8-3bd97ad81d4f","Type":"ContainerStarted","Data":"99ee306da5ad2a9fade27c2d1dda2554a6b5d82842ea1fc6f4f09384c6c718bc"} Feb 27 17:45:01 crc kubenswrapper[4751]: I0227 17:45:01.916056 4751 generic.go:334] "Generic (PLEG): container finished" podID="32293f8e-224e-47f3-95e8-3bd97ad81d4f" containerID="0f520845956e6855be3dbef628d33181ed793f4d35526c5840c6a38d4f157214" exitCode=0 Feb 27 17:45:01 crc kubenswrapper[4751]: I0227 17:45:01.916122 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" event={"ID":"32293f8e-224e-47f3-95e8-3bd97ad81d4f","Type":"ContainerDied","Data":"0f520845956e6855be3dbef628d33181ed793f4d35526c5840c6a38d4f157214"} Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.265100 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.342579 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gktrc\" (UniqueName: \"kubernetes.io/projected/32293f8e-224e-47f3-95e8-3bd97ad81d4f-kube-api-access-gktrc\") pod \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.342783 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/32293f8e-224e-47f3-95e8-3bd97ad81d4f-secret-volume\") pod \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.342837 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/32293f8e-224e-47f3-95e8-3bd97ad81d4f-config-volume\") pod \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\" (UID: \"32293f8e-224e-47f3-95e8-3bd97ad81d4f\") " Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.343927 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32293f8e-224e-47f3-95e8-3bd97ad81d4f-config-volume" (OuterVolumeSpecName: "config-volume") pod "32293f8e-224e-47f3-95e8-3bd97ad81d4f" (UID: "32293f8e-224e-47f3-95e8-3bd97ad81d4f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.349384 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32293f8e-224e-47f3-95e8-3bd97ad81d4f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "32293f8e-224e-47f3-95e8-3bd97ad81d4f" (UID: "32293f8e-224e-47f3-95e8-3bd97ad81d4f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.353791 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32293f8e-224e-47f3-95e8-3bd97ad81d4f-kube-api-access-gktrc" (OuterVolumeSpecName: "kube-api-access-gktrc") pod "32293f8e-224e-47f3-95e8-3bd97ad81d4f" (UID: "32293f8e-224e-47f3-95e8-3bd97ad81d4f"). InnerVolumeSpecName "kube-api-access-gktrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.445275 4751 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/32293f8e-224e-47f3-95e8-3bd97ad81d4f-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.445316 4751 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/32293f8e-224e-47f3-95e8-3bd97ad81d4f-config-volume\") on node \"crc\" DevicePath \"\"" Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.445331 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gktrc\" (UniqueName: \"kubernetes.io/projected/32293f8e-224e-47f3-95e8-3bd97ad81d4f-kube-api-access-gktrc\") on node \"crc\" DevicePath \"\"" Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.934160 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" event={"ID":"32293f8e-224e-47f3-95e8-3bd97ad81d4f","Type":"ContainerDied","Data":"99ee306da5ad2a9fade27c2d1dda2554a6b5d82842ea1fc6f4f09384c6c718bc"} Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.934541 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99ee306da5ad2a9fade27c2d1dda2554a6b5d82842ea1fc6f4f09384c6c718bc" Feb 27 17:45:03 crc kubenswrapper[4751]: I0227 17:45:03.934200 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536905-l2blb" Feb 27 17:45:04 crc kubenswrapper[4751]: I0227 17:45:04.368533 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4"] Feb 27 17:45:04 crc kubenswrapper[4751]: I0227 17:45:04.375903 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536860-bh2g4"] Feb 27 17:45:04 crc kubenswrapper[4751]: I0227 17:45:04.531545 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd785ec2-2ac7-47fb-bc33-07aa127fe1d0" path="/var/lib/kubelet/pods/dd785ec2-2ac7-47fb-bc33-07aa127fe1d0/volumes" Feb 27 17:45:07 crc kubenswrapper[4751]: E0227 17:45:07.524082 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" podUID="d1e31907-3659-412d-8af8-69bd2998180e" Feb 27 17:45:08 crc kubenswrapper[4751]: E0227 17:45:08.530807 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:45:09 crc kubenswrapper[4751]: E0227 17:45:09.523097 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:45:11 crc kubenswrapper[4751]: I0227 17:45:11.520220 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:45:11 crc kubenswrapper[4751]: E0227 17:45:11.521368 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:45:18 crc kubenswrapper[4751]: E0227 17:45:18.531607 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" podUID="d1e31907-3659-412d-8af8-69bd2998180e" Feb 27 17:45:20 crc kubenswrapper[4751]: E0227 17:45:20.523631 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:45:21 crc kubenswrapper[4751]: E0227 17:45:21.522530 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:45:24 crc kubenswrapper[4751]: I0227 17:45:24.521682 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:45:24 crc kubenswrapper[4751]: E0227 17:45:24.522250 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:45:34 crc kubenswrapper[4751]: I0227 17:45:34.202759 4751 generic.go:334] "Generic (PLEG): container finished" podID="d1e31907-3659-412d-8af8-69bd2998180e" containerID="65c481300c7a8c3e70a97cf38785d54403cdeb826ffe23600201431ec01cf0e1" exitCode=0 Feb 27 17:45:34 crc kubenswrapper[4751]: I0227 17:45:34.202845 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" event={"ID":"d1e31907-3659-412d-8af8-69bd2998180e","Type":"ContainerDied","Data":"65c481300c7a8c3e70a97cf38785d54403cdeb826ffe23600201431ec01cf0e1"} Feb 27 17:45:35 crc kubenswrapper[4751]: E0227 17:45:35.522447 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:45:35 crc kubenswrapper[4751]: I0227 17:45:35.638907 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" Feb 27 17:45:35 crc kubenswrapper[4751]: I0227 17:45:35.664227 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2vmj\" (UniqueName: \"kubernetes.io/projected/d1e31907-3659-412d-8af8-69bd2998180e-kube-api-access-n2vmj\") pod \"d1e31907-3659-412d-8af8-69bd2998180e\" (UID: \"d1e31907-3659-412d-8af8-69bd2998180e\") " Feb 27 17:45:35 crc kubenswrapper[4751]: I0227 17:45:35.671338 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1e31907-3659-412d-8af8-69bd2998180e-kube-api-access-n2vmj" (OuterVolumeSpecName: "kube-api-access-n2vmj") pod "d1e31907-3659-412d-8af8-69bd2998180e" (UID: "d1e31907-3659-412d-8af8-69bd2998180e"). InnerVolumeSpecName "kube-api-access-n2vmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:45:35 crc kubenswrapper[4751]: I0227 17:45:35.766099 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2vmj\" (UniqueName: \"kubernetes.io/projected/d1e31907-3659-412d-8af8-69bd2998180e-kube-api-access-n2vmj\") on node \"crc\" DevicePath \"\"" Feb 27 17:45:36 crc kubenswrapper[4751]: I0227 17:45:36.224790 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" event={"ID":"d1e31907-3659-412d-8af8-69bd2998180e","Type":"ContainerDied","Data":"8742be293148c46875dfca7dcf147010baf628ac9f473e55402cb50a45483dff"} Feb 27 17:45:36 crc kubenswrapper[4751]: I0227 17:45:36.224841 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536904-bp2m9" Feb 27 17:45:36 crc kubenswrapper[4751]: I0227 17:45:36.225012 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8742be293148c46875dfca7dcf147010baf628ac9f473e55402cb50a45483dff" Feb 27 17:45:36 crc kubenswrapper[4751]: E0227 17:45:36.524081 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:45:36 crc kubenswrapper[4751]: I0227 17:45:36.747239 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536896-frp6b"] Feb 27 17:45:36 crc kubenswrapper[4751]: I0227 17:45:36.758893 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536896-frp6b"] Feb 27 17:45:38 crc kubenswrapper[4751]: I0227 17:45:38.538929 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40fa2fd2-6adb-4544-bbd2-8e0465e66a84" path="/var/lib/kubelet/pods/40fa2fd2-6adb-4544-bbd2-8e0465e66a84/volumes" Feb 27 17:45:39 crc kubenswrapper[4751]: I0227 17:45:39.521084 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:45:39 crc kubenswrapper[4751]: E0227 17:45:39.521618 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:45:48 crc kubenswrapper[4751]: E0227 17:45:48.531592 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:45:50 crc kubenswrapper[4751]: E0227 17:45:50.523104 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:45:51 crc kubenswrapper[4751]: I0227 17:45:51.995626 4751 scope.go:117] "RemoveContainer" containerID="71bd1940900031bf1ce66d5f9d67f7d46233883d0b3818018ae7387976401f17" Feb 27 17:45:52 crc kubenswrapper[4751]: I0227 17:45:52.029973 4751 scope.go:117] "RemoveContainer" containerID="83003d3384bc71e5269380797723b94dc77b2303b947ba578fd2a8af0a426688" Feb 27 17:45:54 crc kubenswrapper[4751]: I0227 17:45:54.521201 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:45:54 crc kubenswrapper[4751]: E0227 17:45:54.522833 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.166386 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536906-962gp"] Feb 27 17:46:00 crc kubenswrapper[4751]: E0227 17:46:00.167613 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32293f8e-224e-47f3-95e8-3bd97ad81d4f" containerName="collect-profiles" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.167638 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="32293f8e-224e-47f3-95e8-3bd97ad81d4f" containerName="collect-profiles" Feb 27 17:46:00 crc kubenswrapper[4751]: E0227 17:46:00.167728 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1e31907-3659-412d-8af8-69bd2998180e" containerName="oc" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.167742 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1e31907-3659-412d-8af8-69bd2998180e" containerName="oc" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.168017 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="32293f8e-224e-47f3-95e8-3bd97ad81d4f" containerName="collect-profiles" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.168043 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1e31907-3659-412d-8af8-69bd2998180e" containerName="oc" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.168842 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536906-962gp" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.186673 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgqlm\" (UniqueName: \"kubernetes.io/projected/0a0d3c7f-3fd8-462c-8ee9-93d7646a154c-kube-api-access-wgqlm\") pod \"auto-csr-approver-29536906-962gp\" (UID: \"0a0d3c7f-3fd8-462c-8ee9-93d7646a154c\") " pod="openshift-infra/auto-csr-approver-29536906-962gp" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.190885 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536906-962gp"] Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.288677 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgqlm\" (UniqueName: \"kubernetes.io/projected/0a0d3c7f-3fd8-462c-8ee9-93d7646a154c-kube-api-access-wgqlm\") pod \"auto-csr-approver-29536906-962gp\" (UID: \"0a0d3c7f-3fd8-462c-8ee9-93d7646a154c\") " pod="openshift-infra/auto-csr-approver-29536906-962gp" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.321224 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgqlm\" (UniqueName: \"kubernetes.io/projected/0a0d3c7f-3fd8-462c-8ee9-93d7646a154c-kube-api-access-wgqlm\") pod \"auto-csr-approver-29536906-962gp\" (UID: \"0a0d3c7f-3fd8-462c-8ee9-93d7646a154c\") " pod="openshift-infra/auto-csr-approver-29536906-962gp" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.502630 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536906-962gp" Feb 27 17:46:00 crc kubenswrapper[4751]: I0227 17:46:00.786629 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536906-962gp"] Feb 27 17:46:00 crc kubenswrapper[4751]: W0227 17:46:00.793911 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a0d3c7f_3fd8_462c_8ee9_93d7646a154c.slice/crio-2e7e5d161e54569103f6ad5554d4f954f21666f2c26f723953d63e55d46fbbcd WatchSource:0}: Error finding container 2e7e5d161e54569103f6ad5554d4f954f21666f2c26f723953d63e55d46fbbcd: Status 404 returned error can't find the container with id 2e7e5d161e54569103f6ad5554d4f954f21666f2c26f723953d63e55d46fbbcd Feb 27 17:46:01 crc kubenswrapper[4751]: I0227 17:46:01.483215 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536906-962gp" event={"ID":"0a0d3c7f-3fd8-462c-8ee9-93d7646a154c","Type":"ContainerStarted","Data":"2e7e5d161e54569103f6ad5554d4f954f21666f2c26f723953d63e55d46fbbcd"} Feb 27 17:46:01 crc kubenswrapper[4751]: E0227 17:46:01.523481 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:46:02 crc kubenswrapper[4751]: E0227 17:46:02.523264 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:46:04 crc kubenswrapper[4751]: I0227 17:46:04.513457 4751 generic.go:334] "Generic (PLEG): container finished" podID="0a0d3c7f-3fd8-462c-8ee9-93d7646a154c" containerID="a4bfc45fda689afd626f10704cf897f8fe73389f97b44133213d03a39f5f013c" exitCode=0 Feb 27 17:46:04 crc kubenswrapper[4751]: I0227 17:46:04.513541 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536906-962gp" event={"ID":"0a0d3c7f-3fd8-462c-8ee9-93d7646a154c","Type":"ContainerDied","Data":"a4bfc45fda689afd626f10704cf897f8fe73389f97b44133213d03a39f5f013c"} Feb 27 17:46:05 crc kubenswrapper[4751]: I0227 17:46:05.897706 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536906-962gp" Feb 27 17:46:06 crc kubenswrapper[4751]: I0227 17:46:06.080833 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgqlm\" (UniqueName: \"kubernetes.io/projected/0a0d3c7f-3fd8-462c-8ee9-93d7646a154c-kube-api-access-wgqlm\") pod \"0a0d3c7f-3fd8-462c-8ee9-93d7646a154c\" (UID: \"0a0d3c7f-3fd8-462c-8ee9-93d7646a154c\") " Feb 27 17:46:06 crc kubenswrapper[4751]: I0227 17:46:06.089044 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a0d3c7f-3fd8-462c-8ee9-93d7646a154c-kube-api-access-wgqlm" (OuterVolumeSpecName: "kube-api-access-wgqlm") pod "0a0d3c7f-3fd8-462c-8ee9-93d7646a154c" (UID: "0a0d3c7f-3fd8-462c-8ee9-93d7646a154c"). InnerVolumeSpecName "kube-api-access-wgqlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:46:06 crc kubenswrapper[4751]: I0227 17:46:06.183224 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgqlm\" (UniqueName: \"kubernetes.io/projected/0a0d3c7f-3fd8-462c-8ee9-93d7646a154c-kube-api-access-wgqlm\") on node \"crc\" DevicePath \"\"" Feb 27 17:46:06 crc kubenswrapper[4751]: I0227 17:46:06.536474 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536906-962gp" event={"ID":"0a0d3c7f-3fd8-462c-8ee9-93d7646a154c","Type":"ContainerDied","Data":"2e7e5d161e54569103f6ad5554d4f954f21666f2c26f723953d63e55d46fbbcd"} Feb 27 17:46:06 crc kubenswrapper[4751]: I0227 17:46:06.536947 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e7e5d161e54569103f6ad5554d4f954f21666f2c26f723953d63e55d46fbbcd" Feb 27 17:46:06 crc kubenswrapper[4751]: I0227 17:46:06.536537 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536906-962gp" Feb 27 17:46:06 crc kubenswrapper[4751]: I0227 17:46:06.993143 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536900-nrb6l"] Feb 27 17:46:06 crc kubenswrapper[4751]: I0227 17:46:06.999318 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536900-nrb6l"] Feb 27 17:46:07 crc kubenswrapper[4751]: I0227 17:46:07.520866 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:46:07 crc kubenswrapper[4751]: E0227 17:46:07.521319 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:46:08 crc kubenswrapper[4751]: I0227 17:46:08.535615 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="372a6de0-a6f7-481f-a15f-8ef33633c626" path="/var/lib/kubelet/pods/372a6de0-a6f7-481f-a15f-8ef33633c626/volumes" Feb 27 17:46:15 crc kubenswrapper[4751]: E0227 17:46:15.524704 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:46:15 crc kubenswrapper[4751]: E0227 17:46:15.524740 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" Feb 27 17:46:20 crc kubenswrapper[4751]: I0227 17:46:20.521020 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:46:20 crc kubenswrapper[4751]: E0227 17:46:20.521739 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:46:27 crc kubenswrapper[4751]: E0227 17:46:27.523195 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:46:30 crc kubenswrapper[4751]: I0227 17:46:30.525643 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:46:31 crc kubenswrapper[4751]: I0227 17:46:31.772834 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7swh" event={"ID":"6fa2b8c3-b236-45df-aecb-d5abdb1d549f","Type":"ContainerStarted","Data":"685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c"} Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.275365 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mmdln"] Feb 27 17:46:32 crc kubenswrapper[4751]: E0227 17:46:32.275923 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0d3c7f-3fd8-462c-8ee9-93d7646a154c" containerName="oc" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.275955 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0d3c7f-3fd8-462c-8ee9-93d7646a154c" containerName="oc" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.276249 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a0d3c7f-3fd8-462c-8ee9-93d7646a154c" containerName="oc" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.278117 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.304605 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmdln"] Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.402712 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kczvp\" (UniqueName: \"kubernetes.io/projected/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-kube-api-access-kczvp\") pod \"redhat-marketplace-mmdln\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.402810 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-catalog-content\") pod \"redhat-marketplace-mmdln\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.402898 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-utilities\") pod \"redhat-marketplace-mmdln\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.503976 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kczvp\" (UniqueName: \"kubernetes.io/projected/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-kube-api-access-kczvp\") pod \"redhat-marketplace-mmdln\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.504087 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-catalog-content\") pod \"redhat-marketplace-mmdln\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.504205 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-utilities\") pod \"redhat-marketplace-mmdln\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.504785 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-catalog-content\") pod \"redhat-marketplace-mmdln\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.504867 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-utilities\") pod \"redhat-marketplace-mmdln\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.524642 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kczvp\" (UniqueName: \"kubernetes.io/projected/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-kube-api-access-kczvp\") pod \"redhat-marketplace-mmdln\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.603146 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.787114 4751 generic.go:334] "Generic (PLEG): container finished" podID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerID="685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c" exitCode=0 Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.787145 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7swh" event={"ID":"6fa2b8c3-b236-45df-aecb-d5abdb1d549f","Type":"ContainerDied","Data":"685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c"} Feb 27 17:46:32 crc kubenswrapper[4751]: I0227 17:46:32.852233 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmdln"] Feb 27 17:46:33 crc kubenswrapper[4751]: I0227 17:46:33.521175 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:46:33 crc kubenswrapper[4751]: I0227 17:46:33.798185 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7swh" event={"ID":"6fa2b8c3-b236-45df-aecb-d5abdb1d549f","Type":"ContainerStarted","Data":"eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0"} Feb 27 17:46:33 crc kubenswrapper[4751]: I0227 17:46:33.801753 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"f6933fc985518da96693a8b2d200d4e49145e296c6cdf1cf2e8ca5bb1a0e9524"} Feb 27 17:46:33 crc kubenswrapper[4751]: I0227 17:46:33.804862 4751 generic.go:334] "Generic (PLEG): container finished" podID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerID="4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c" exitCode=0 Feb 27 17:46:33 crc kubenswrapper[4751]: I0227 17:46:33.804930 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmdln" event={"ID":"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c","Type":"ContainerDied","Data":"4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c"} Feb 27 17:46:33 crc kubenswrapper[4751]: I0227 17:46:33.804972 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmdln" event={"ID":"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c","Type":"ContainerStarted","Data":"7f2d30b0f903bc64472467d838e58af8131a51631880e4565847a8c44d3a5c87"} Feb 27 17:46:33 crc kubenswrapper[4751]: I0227 17:46:33.837396 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-b7swh" podStartSLOduration=2.103451333 podStartE2EDuration="5m52.837373789s" podCreationTimestamp="2026-02-27 17:40:41 +0000 UTC" firstStartedPulling="2026-02-27 17:40:42.488254995 +0000 UTC m=+4604.635269442" lastFinishedPulling="2026-02-27 17:46:33.222177411 +0000 UTC m=+4955.369191898" observedRunningTime="2026-02-27 17:46:33.83280091 +0000 UTC m=+4955.979815397" watchObservedRunningTime="2026-02-27 17:46:33.837373789 +0000 UTC m=+4955.984388276" Feb 27 17:46:34 crc kubenswrapper[4751]: E0227 17:46:34.460283 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 27 17:46:34 crc kubenswrapper[4751]: E0227 17:46:34.460688 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kczvp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-mmdln_openshift-marketplace(028097b5-dd4b-4860-bbe4-aa3cb5a79c2c): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:46:34 crc kubenswrapper[4751]: E0227 17:46:34.461965 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/redhat-marketplace-mmdln" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" Feb 27 17:46:34 crc kubenswrapper[4751]: E0227 17:46:34.818015 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-mmdln" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" Feb 27 17:46:40 crc kubenswrapper[4751]: E0227 17:46:40.523819 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:46:41 crc kubenswrapper[4751]: I0227 17:46:41.527542 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:46:41 crc kubenswrapper[4751]: I0227 17:46:41.527598 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:46:41 crc kubenswrapper[4751]: I0227 17:46:41.577586 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:46:41 crc kubenswrapper[4751]: I0227 17:46:41.917192 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:46:42 crc kubenswrapper[4751]: I0227 17:46:42.664464 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b7swh"] Feb 27 17:46:43 crc kubenswrapper[4751]: I0227 17:46:43.896130 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-b7swh" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerName="registry-server" containerID="cri-o://eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0" gracePeriod=2 Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.463584 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.589533 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-utilities\") pod \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.589657 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-catalog-content\") pod \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.589686 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdt42\" (UniqueName: \"kubernetes.io/projected/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-kube-api-access-bdt42\") pod \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\" (UID: \"6fa2b8c3-b236-45df-aecb-d5abdb1d549f\") " Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.590844 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-utilities" (OuterVolumeSpecName: "utilities") pod "6fa2b8c3-b236-45df-aecb-d5abdb1d549f" (UID: "6fa2b8c3-b236-45df-aecb-d5abdb1d549f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.597068 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-kube-api-access-bdt42" (OuterVolumeSpecName: "kube-api-access-bdt42") pod "6fa2b8c3-b236-45df-aecb-d5abdb1d549f" (UID: "6fa2b8c3-b236-45df-aecb-d5abdb1d549f"). InnerVolumeSpecName "kube-api-access-bdt42". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.652446 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6fa2b8c3-b236-45df-aecb-d5abdb1d549f" (UID: "6fa2b8c3-b236-45df-aecb-d5abdb1d549f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.692230 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.692286 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.692308 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdt42\" (UniqueName: \"kubernetes.io/projected/6fa2b8c3-b236-45df-aecb-d5abdb1d549f-kube-api-access-bdt42\") on node \"crc\" DevicePath \"\"" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.904323 4751 generic.go:334] "Generic (PLEG): container finished" podID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerID="eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0" exitCode=0 Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.904375 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7swh" event={"ID":"6fa2b8c3-b236-45df-aecb-d5abdb1d549f","Type":"ContainerDied","Data":"eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0"} Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.904426 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7swh" event={"ID":"6fa2b8c3-b236-45df-aecb-d5abdb1d549f","Type":"ContainerDied","Data":"5185277d3a7e00c60ae7c952e414cb7340703c69cc1094248a7fc94bec8be32b"} Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.904426 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b7swh" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.904448 4751 scope.go:117] "RemoveContainer" containerID="eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.926244 4751 scope.go:117] "RemoveContainer" containerID="685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.954508 4751 scope.go:117] "RemoveContainer" containerID="d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.964067 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b7swh"] Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.973150 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-b7swh"] Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.979027 4751 scope.go:117] "RemoveContainer" containerID="eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0" Feb 27 17:46:44 crc kubenswrapper[4751]: E0227 17:46:44.979463 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0\": container with ID starting with eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0 not found: ID does not exist" containerID="eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.979511 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0"} err="failed to get container status \"eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0\": rpc error: code = NotFound desc = could not find container \"eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0\": container with ID starting with eda0ab37eb8d2fa7e451064f23bc36a5f930ddc472dc1c3d317bc7f4594ac8b0 not found: ID does not exist" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.979541 4751 scope.go:117] "RemoveContainer" containerID="685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c" Feb 27 17:46:44 crc kubenswrapper[4751]: E0227 17:46:44.979887 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c\": container with ID starting with 685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c not found: ID does not exist" containerID="685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.979919 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c"} err="failed to get container status \"685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c\": rpc error: code = NotFound desc = could not find container \"685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c\": container with ID starting with 685fa028c98d196831d114dd49616cdc2dae76a1ec5a43eba05db19c9ffe4d9c not found: ID does not exist" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.979940 4751 scope.go:117] "RemoveContainer" containerID="d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd" Feb 27 17:46:44 crc kubenswrapper[4751]: E0227 17:46:44.980191 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd\": container with ID starting with d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd not found: ID does not exist" containerID="d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd" Feb 27 17:46:44 crc kubenswrapper[4751]: I0227 17:46:44.980224 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd"} err="failed to get container status \"d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd\": rpc error: code = NotFound desc = could not find container \"d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd\": container with ID starting with d48c30d0e39febbcb76690cbc833ad0597dddc23fb1c6928c76ea3f7d912f6bd not found: ID does not exist" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.180106 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-f8whw"] Feb 27 17:46:46 crc kubenswrapper[4751]: E0227 17:46:46.180735 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerName="extract-content" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.180751 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerName="extract-content" Feb 27 17:46:46 crc kubenswrapper[4751]: E0227 17:46:46.180772 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerName="registry-server" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.180779 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerName="registry-server" Feb 27 17:46:46 crc kubenswrapper[4751]: E0227 17:46:46.180788 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerName="extract-utilities" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.180794 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerName="extract-utilities" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.180957 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" containerName="registry-server" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.181647 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.190915 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.193319 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.193376 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-np2qq" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.193318 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.196965 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.209091 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-f8whw"] Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.315193 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7wcl\" (UniqueName: \"kubernetes.io/projected/bc279045-0353-4e5c-9be9-5f98561874e5-kube-api-access-c7wcl\") pod \"dnsmasq-dns-5d7b5456f5-f8whw\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.315263 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-f8whw\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.315329 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-config\") pod \"dnsmasq-dns-5d7b5456f5-f8whw\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.396651 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-679g7"] Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.401619 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.409472 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-679g7"] Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.416504 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7wcl\" (UniqueName: \"kubernetes.io/projected/bc279045-0353-4e5c-9be9-5f98561874e5-kube-api-access-c7wcl\") pod \"dnsmasq-dns-5d7b5456f5-f8whw\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.416585 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-f8whw\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.416638 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-config\") pod \"dnsmasq-dns-5d7b5456f5-f8whw\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.417491 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-config\") pod \"dnsmasq-dns-5d7b5456f5-f8whw\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.417683 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-f8whw\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.446977 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7wcl\" (UniqueName: \"kubernetes.io/projected/bc279045-0353-4e5c-9be9-5f98561874e5-kube-api-access-c7wcl\") pod \"dnsmasq-dns-5d7b5456f5-f8whw\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.495964 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.517463 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-config\") pod \"dnsmasq-dns-98ddfc8f-679g7\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.517515 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdkwm\" (UniqueName: \"kubernetes.io/projected/3f97612c-7a8b-4e2c-93c0-02889ef80459-kube-api-access-vdkwm\") pod \"dnsmasq-dns-98ddfc8f-679g7\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.517592 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-679g7\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.534287 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fa2b8c3-b236-45df-aecb-d5abdb1d549f" path="/var/lib/kubelet/pods/6fa2b8c3-b236-45df-aecb-d5abdb1d549f/volumes" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.618657 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-config\") pod \"dnsmasq-dns-98ddfc8f-679g7\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.618705 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdkwm\" (UniqueName: \"kubernetes.io/projected/3f97612c-7a8b-4e2c-93c0-02889ef80459-kube-api-access-vdkwm\") pod \"dnsmasq-dns-98ddfc8f-679g7\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.618784 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-679g7\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.619712 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-config\") pod \"dnsmasq-dns-98ddfc8f-679g7\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.619764 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-679g7\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.658075 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdkwm\" (UniqueName: \"kubernetes.io/projected/3f97612c-7a8b-4e2c-93c0-02889ef80459-kube-api-access-vdkwm\") pod \"dnsmasq-dns-98ddfc8f-679g7\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:46 crc kubenswrapper[4751]: I0227 17:46:46.727042 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.032424 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-f8whw"] Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.143552 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-679g7"] Feb 27 17:46:47 crc kubenswrapper[4751]: W0227 17:46:47.145574 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f97612c_7a8b_4e2c_93c0_02889ef80459.slice/crio-85a68c3c120bc14fc2572a2a10b61c7bcb383238e95d5d7426ecdb98a09c667c WatchSource:0}: Error finding container 85a68c3c120bc14fc2572a2a10b61c7bcb383238e95d5d7426ecdb98a09c667c: Status 404 returned error can't find the container with id 85a68c3c120bc14fc2572a2a10b61c7bcb383238e95d5d7426ecdb98a09c667c Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.292077 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.294171 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.298986 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.299073 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.299091 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-lfxm5" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.299120 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.299971 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.307049 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.429647 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92mxv\" (UniqueName: \"kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-kube-api-access-92mxv\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.429694 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.429718 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.429736 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.429755 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.429782 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b5737b22-9f6b-4358-8078-ba61f4ecaa55-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.429806 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.429827 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b5737b22-9f6b-4358-8078-ba61f4ecaa55-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.429971 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.530794 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92mxv\" (UniqueName: \"kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-kube-api-access-92mxv\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.531044 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.531139 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.531212 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.531299 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.531400 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b5737b22-9f6b-4358-8078-ba61f4ecaa55-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.531527 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.531724 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.531983 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.532419 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b5737b22-9f6b-4358-8078-ba61f4ecaa55-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.532500 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.532684 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.533096 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.533735 4751 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.533767 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/17ae5b1c4afad9afea74dcc35dc03ee0cd4dd4b37cf32e8d6a15e95fb4e7ab1f/globalmount\"" pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.541957 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b5737b22-9f6b-4358-8078-ba61f4ecaa55-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.542046 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b5737b22-9f6b-4358-8078-ba61f4ecaa55-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.546178 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.551126 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92mxv\" (UniqueName: \"kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-kube-api-access-92mxv\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.565835 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") pod \"rabbitmq-server-0\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.577426 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.578566 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.580962 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.580973 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-b5zj2" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.581067 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.581078 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.580982 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.592520 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.654855 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.744988 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.745095 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.745153 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v8v8\" (UniqueName: \"kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-kube-api-access-4v8v8\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.745181 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.745247 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.745302 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/09b01bad-88e2-4cfc-874d-28c567cba1ea-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.745367 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.745393 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/09b01bad-88e2-4cfc-874d-28c567cba1ea-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.745529 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: E0227 17:46:47.748039 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 27 17:46:47 crc kubenswrapper[4751]: E0227 17:46:47.748311 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kczvp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-mmdln_openshift-marketplace(028097b5-dd4b-4860-bbe4-aa3cb5a79c2c): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:46:47 crc kubenswrapper[4751]: E0227 17:46:47.749855 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/redhat-marketplace-mmdln" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.846720 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.846766 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/09b01bad-88e2-4cfc-874d-28c567cba1ea-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.846797 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.846815 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/09b01bad-88e2-4cfc-874d-28c567cba1ea-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.846843 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.846883 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.846923 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.846947 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v8v8\" (UniqueName: \"kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-kube-api-access-4v8v8\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.846970 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.847617 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.847938 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.848082 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.848332 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.849937 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/09b01bad-88e2-4cfc-874d-28c567cba1ea-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.852800 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/09b01bad-88e2-4cfc-874d-28c567cba1ea-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.853457 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.857729 4751 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.857760 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fa695d6d534e31bdc767ef4b2993b9c7e55a3b9645abe15cec55d09e67c1246e/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.862619 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v8v8\" (UniqueName: \"kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-kube-api-access-4v8v8\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.895074 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") pod \"rabbitmq-cell1-server-0\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.918003 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 17:46:47 crc kubenswrapper[4751]: W0227 17:46:47.918981 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5737b22_9f6b_4358_8078_ba61f4ecaa55.slice/crio-0defa6a50b142148f1c9513c200453509a978c473e743bccf691baf82660f744 WatchSource:0}: Error finding container 0defa6a50b142148f1c9513c200453509a978c473e743bccf691baf82660f744: Status 404 returned error can't find the container with id 0defa6a50b142148f1c9513c200453509a978c473e743bccf691baf82660f744 Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.924706 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.932219 4751 generic.go:334] "Generic (PLEG): container finished" podID="3f97612c-7a8b-4e2c-93c0-02889ef80459" containerID="2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab" exitCode=0 Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.932285 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" event={"ID":"3f97612c-7a8b-4e2c-93c0-02889ef80459","Type":"ContainerDied","Data":"2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab"} Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.932310 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" event={"ID":"3f97612c-7a8b-4e2c-93c0-02889ef80459","Type":"ContainerStarted","Data":"85a68c3c120bc14fc2572a2a10b61c7bcb383238e95d5d7426ecdb98a09c667c"} Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.933347 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b5737b22-9f6b-4358-8078-ba61f4ecaa55","Type":"ContainerStarted","Data":"0defa6a50b142148f1c9513c200453509a978c473e743bccf691baf82660f744"} Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.934744 4751 generic.go:334] "Generic (PLEG): container finished" podID="bc279045-0353-4e5c-9be9-5f98561874e5" containerID="aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5" exitCode=0 Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.934787 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" event={"ID":"bc279045-0353-4e5c-9be9-5f98561874e5","Type":"ContainerDied","Data":"aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5"} Feb 27 17:46:47 crc kubenswrapper[4751]: I0227 17:46:47.934815 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" event={"ID":"bc279045-0353-4e5c-9be9-5f98561874e5","Type":"ContainerStarted","Data":"7cc84720a26044a0b84144d5f67ae8f69f143d6e6f0110ef79cfa9e3e01332a4"} Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.397831 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 17:46:48 crc kubenswrapper[4751]: W0227 17:46:48.403991 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09b01bad_88e2_4cfc_874d_28c567cba1ea.slice/crio-b7ec5995e40e1b3ad228afba459965412f6fbea3c1398891b0dfc482933e6386 WatchSource:0}: Error finding container b7ec5995e40e1b3ad228afba459965412f6fbea3c1398891b0dfc482933e6386: Status 404 returned error can't find the container with id b7ec5995e40e1b3ad228afba459965412f6fbea3c1398891b0dfc482933e6386 Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.744326 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.745921 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.749229 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.749676 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.749859 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.750182 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-5ks62" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.755589 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.758126 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.877156 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.877536 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.877619 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-kolla-config\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.877704 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.877777 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c06ebee7-0be9-47c1-829d-f905da8f6f6e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c06ebee7-0be9-47c1-829d-f905da8f6f6e\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.877876 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-config-data-default\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.877909 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g692h\" (UniqueName: \"kubernetes.io/projected/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-kube-api-access-g692h\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.877945 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.953476 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"09b01bad-88e2-4cfc-874d-28c567cba1ea","Type":"ContainerStarted","Data":"b7ec5995e40e1b3ad228afba459965412f6fbea3c1398891b0dfc482933e6386"} Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.957972 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" event={"ID":"bc279045-0353-4e5c-9be9-5f98561874e5","Type":"ContainerStarted","Data":"6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968"} Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.958473 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.966080 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" event={"ID":"3f97612c-7a8b-4e2c-93c0-02889ef80459","Type":"ContainerStarted","Data":"3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae"} Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.966362 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.979016 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.979068 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c06ebee7-0be9-47c1-829d-f905da8f6f6e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c06ebee7-0be9-47c1-829d-f905da8f6f6e\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.979131 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-config-data-default\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.979151 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g692h\" (UniqueName: \"kubernetes.io/projected/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-kube-api-access-g692h\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.979168 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.979189 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.979217 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.979254 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-kolla-config\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.979887 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-kolla-config\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.980825 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.981534 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.981938 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-config-data-default\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.983722 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.984305 4751 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.984336 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c06ebee7-0be9-47c1-829d-f905da8f6f6e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c06ebee7-0be9-47c1-829d-f905da8f6f6e\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6abbfd6e967d6b42c0598a6b0c6afc38cf8cba215dc11b1ed1f45af1d77ea593/globalmount\"" pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.984959 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:48 crc kubenswrapper[4751]: I0227 17:46:48.985794 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" podStartSLOduration=2.985781897 podStartE2EDuration="2.985781897s" podCreationTimestamp="2026-02-27 17:46:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:46:48.980324705 +0000 UTC m=+4971.127339182" watchObservedRunningTime="2026-02-27 17:46:48.985781897 +0000 UTC m=+4971.132796354" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.004086 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g692h\" (UniqueName: \"kubernetes.io/projected/2a4b1b5e-d595-4ff3-a99a-e13198440ef3-kube-api-access-g692h\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.187235 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" podStartSLOduration=3.187218912 podStartE2EDuration="3.187218912s" podCreationTimestamp="2026-02-27 17:46:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:46:49.02299685 +0000 UTC m=+4971.170011297" watchObservedRunningTime="2026-02-27 17:46:49.187218912 +0000 UTC m=+4971.334233359" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.187858 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.188752 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.199584 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.200109 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-rswc9" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.215366 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.284244 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d5mg\" (UniqueName: \"kubernetes.io/projected/f830ff03-d833-4cb2-96fa-19216f5df45a-kube-api-access-8d5mg\") pod \"memcached-0\" (UID: \"f830ff03-d833-4cb2-96fa-19216f5df45a\") " pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.284311 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f830ff03-d833-4cb2-96fa-19216f5df45a-config-data\") pod \"memcached-0\" (UID: \"f830ff03-d833-4cb2-96fa-19216f5df45a\") " pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.284378 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f830ff03-d833-4cb2-96fa-19216f5df45a-kolla-config\") pod \"memcached-0\" (UID: \"f830ff03-d833-4cb2-96fa-19216f5df45a\") " pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.385662 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f830ff03-d833-4cb2-96fa-19216f5df45a-config-data\") pod \"memcached-0\" (UID: \"f830ff03-d833-4cb2-96fa-19216f5df45a\") " pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.385705 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d5mg\" (UniqueName: \"kubernetes.io/projected/f830ff03-d833-4cb2-96fa-19216f5df45a-kube-api-access-8d5mg\") pod \"memcached-0\" (UID: \"f830ff03-d833-4cb2-96fa-19216f5df45a\") " pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.385746 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f830ff03-d833-4cb2-96fa-19216f5df45a-kolla-config\") pod \"memcached-0\" (UID: \"f830ff03-d833-4cb2-96fa-19216f5df45a\") " pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.386900 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f830ff03-d833-4cb2-96fa-19216f5df45a-kolla-config\") pod \"memcached-0\" (UID: \"f830ff03-d833-4cb2-96fa-19216f5df45a\") " pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.386919 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f830ff03-d833-4cb2-96fa-19216f5df45a-config-data\") pod \"memcached-0\" (UID: \"f830ff03-d833-4cb2-96fa-19216f5df45a\") " pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.505869 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d5mg\" (UniqueName: \"kubernetes.io/projected/f830ff03-d833-4cb2-96fa-19216f5df45a-kube-api-access-8d5mg\") pod \"memcached-0\" (UID: \"f830ff03-d833-4cb2-96fa-19216f5df45a\") " pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.727756 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c06ebee7-0be9-47c1-829d-f905da8f6f6e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c06ebee7-0be9-47c1-829d-f905da8f6f6e\") pod \"openstack-galera-0\" (UID: \"2a4b1b5e-d595-4ff3-a99a-e13198440ef3\") " pod="openstack/openstack-galera-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.805642 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.974325 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 27 17:46:49 crc kubenswrapper[4751]: I0227 17:46:49.979556 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b5737b22-9f6b-4358-8078-ba61f4ecaa55","Type":"ContainerStarted","Data":"420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d"} Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.319905 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.321607 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.323158 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-c6dbp" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.330189 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.331100 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.331604 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.338471 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.349453 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.400924 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88052214-0232-41d1-979a-1d1b8a45d674-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.400971 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f0b1f2c0-7835-4fe0-bfe3-02b72248006d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0b1f2c0-7835-4fe0-bfe3-02b72248006d\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.401033 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/88052214-0232-41d1-979a-1d1b8a45d674-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.401055 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/88052214-0232-41d1-979a-1d1b8a45d674-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.401076 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88052214-0232-41d1-979a-1d1b8a45d674-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.401089 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/88052214-0232-41d1-979a-1d1b8a45d674-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.401126 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/88052214-0232-41d1-979a-1d1b8a45d674-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.401148 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tdkc\" (UniqueName: \"kubernetes.io/projected/88052214-0232-41d1-979a-1d1b8a45d674-kube-api-access-7tdkc\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.494265 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.502066 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88052214-0232-41d1-979a-1d1b8a45d674-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.502126 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/88052214-0232-41d1-979a-1d1b8a45d674-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.502210 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/88052214-0232-41d1-979a-1d1b8a45d674-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.502252 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tdkc\" (UniqueName: \"kubernetes.io/projected/88052214-0232-41d1-979a-1d1b8a45d674-kube-api-access-7tdkc\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.502314 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88052214-0232-41d1-979a-1d1b8a45d674-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.502361 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f0b1f2c0-7835-4fe0-bfe3-02b72248006d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0b1f2c0-7835-4fe0-bfe3-02b72248006d\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.502490 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/88052214-0232-41d1-979a-1d1b8a45d674-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.502533 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/88052214-0232-41d1-979a-1d1b8a45d674-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.503322 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/88052214-0232-41d1-979a-1d1b8a45d674-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.503741 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/88052214-0232-41d1-979a-1d1b8a45d674-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.507618 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88052214-0232-41d1-979a-1d1b8a45d674-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.508473 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/88052214-0232-41d1-979a-1d1b8a45d674-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.509961 4751 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.509990 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f0b1f2c0-7835-4fe0-bfe3-02b72248006d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0b1f2c0-7835-4fe0-bfe3-02b72248006d\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ac26af0ca85bb1f0eb2176e733ee646367e8aafae54ba717d025ae42b6c4940f/globalmount\"" pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.513683 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88052214-0232-41d1-979a-1d1b8a45d674-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.514942 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/88052214-0232-41d1-979a-1d1b8a45d674-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: W0227 17:46:50.516588 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a4b1b5e_d595_4ff3_a99a_e13198440ef3.slice/crio-0828bebc066d8a3ef6fcd84e07764a88f5bf244cf6d01c35d157be7c91c61cd4 WatchSource:0}: Error finding container 0828bebc066d8a3ef6fcd84e07764a88f5bf244cf6d01c35d157be7c91c61cd4: Status 404 returned error can't find the container with id 0828bebc066d8a3ef6fcd84e07764a88f5bf244cf6d01c35d157be7c91c61cd4 Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.525450 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tdkc\" (UniqueName: \"kubernetes.io/projected/88052214-0232-41d1-979a-1d1b8a45d674-kube-api-access-7tdkc\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.533318 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f0b1f2c0-7835-4fe0-bfe3-02b72248006d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f0b1f2c0-7835-4fe0-bfe3-02b72248006d\") pod \"openstack-cell1-galera-0\" (UID: \"88052214-0232-41d1-979a-1d1b8a45d674\") " pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.645710 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.986988 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f830ff03-d833-4cb2-96fa-19216f5df45a","Type":"ContainerStarted","Data":"2c9322420eebfc9dc7d5ca6813494b2eafae7e69ddae8a23b959ab4cfe6b6f63"} Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.987326 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f830ff03-d833-4cb2-96fa-19216f5df45a","Type":"ContainerStarted","Data":"186e7bed80408b990ad8f745d6d9b7f840fc37e852bd907b324e45fd3271c7fb"} Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.987594 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.991798 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"09b01bad-88e2-4cfc-874d-28c567cba1ea","Type":"ContainerStarted","Data":"dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3"} Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.994082 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2a4b1b5e-d595-4ff3-a99a-e13198440ef3","Type":"ContainerStarted","Data":"e095a729fe8b2d0e8836450c03262b31d1ea1f50f50e3d0caf28bcfc5220f8f8"} Feb 27 17:46:50 crc kubenswrapper[4751]: I0227 17:46:50.994121 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2a4b1b5e-d595-4ff3-a99a-e13198440ef3","Type":"ContainerStarted","Data":"0828bebc066d8a3ef6fcd84e07764a88f5bf244cf6d01c35d157be7c91c61cd4"} Feb 27 17:46:51 crc kubenswrapper[4751]: I0227 17:46:51.013854 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.013834012 podStartE2EDuration="2.013834012s" podCreationTimestamp="2026-02-27 17:46:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:46:51.005166795 +0000 UTC m=+4973.152181242" watchObservedRunningTime="2026-02-27 17:46:51.013834012 +0000 UTC m=+4973.160848449" Feb 27 17:46:51 crc kubenswrapper[4751]: I0227 17:46:51.134961 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 27 17:46:51 crc kubenswrapper[4751]: W0227 17:46:51.143602 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88052214_0232_41d1_979a_1d1b8a45d674.slice/crio-265b8a0a20690b405c45d47cacc26c64bffaba2a03b91d1c040852fa775dd80f WatchSource:0}: Error finding container 265b8a0a20690b405c45d47cacc26c64bffaba2a03b91d1c040852fa775dd80f: Status 404 returned error can't find the container with id 265b8a0a20690b405c45d47cacc26c64bffaba2a03b91d1c040852fa775dd80f Feb 27 17:46:52 crc kubenswrapper[4751]: I0227 17:46:52.006689 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"88052214-0232-41d1-979a-1d1b8a45d674","Type":"ContainerStarted","Data":"0905943c698aff77be3a5b7a543c1037c3c20fcd5399faf983cb5858731ce44b"} Feb 27 17:46:52 crc kubenswrapper[4751]: I0227 17:46:52.007067 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"88052214-0232-41d1-979a-1d1b8a45d674","Type":"ContainerStarted","Data":"265b8a0a20690b405c45d47cacc26c64bffaba2a03b91d1c040852fa775dd80f"} Feb 27 17:46:52 crc kubenswrapper[4751]: I0227 17:46:52.165326 4751 scope.go:117] "RemoveContainer" containerID="2785287c5ebfd01517f402eaee5a9bd610daf9fc8bab8c8695da960a70e2b77c" Feb 27 17:46:52 crc kubenswrapper[4751]: E0227 17:46:52.523339 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:46:55 crc kubenswrapper[4751]: I0227 17:46:55.036839 4751 generic.go:334] "Generic (PLEG): container finished" podID="88052214-0232-41d1-979a-1d1b8a45d674" containerID="0905943c698aff77be3a5b7a543c1037c3c20fcd5399faf983cb5858731ce44b" exitCode=0 Feb 27 17:46:55 crc kubenswrapper[4751]: I0227 17:46:55.036917 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"88052214-0232-41d1-979a-1d1b8a45d674","Type":"ContainerDied","Data":"0905943c698aff77be3a5b7a543c1037c3c20fcd5399faf983cb5858731ce44b"} Feb 27 17:46:55 crc kubenswrapper[4751]: I0227 17:46:55.040462 4751 generic.go:334] "Generic (PLEG): container finished" podID="2a4b1b5e-d595-4ff3-a99a-e13198440ef3" containerID="e095a729fe8b2d0e8836450c03262b31d1ea1f50f50e3d0caf28bcfc5220f8f8" exitCode=0 Feb 27 17:46:55 crc kubenswrapper[4751]: I0227 17:46:55.040532 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2a4b1b5e-d595-4ff3-a99a-e13198440ef3","Type":"ContainerDied","Data":"e095a729fe8b2d0e8836450c03262b31d1ea1f50f50e3d0caf28bcfc5220f8f8"} Feb 27 17:46:56 crc kubenswrapper[4751]: I0227 17:46:56.053121 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"88052214-0232-41d1-979a-1d1b8a45d674","Type":"ContainerStarted","Data":"07f9e0ee6f991ce92444820a2b7db16918f894fff7254f9e275535ad6d16e0e0"} Feb 27 17:46:56 crc kubenswrapper[4751]: I0227 17:46:56.057038 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2a4b1b5e-d595-4ff3-a99a-e13198440ef3","Type":"ContainerStarted","Data":"b030d6cf1ead61932c1c6dddad3fdc155f2d2938d25196c14b4a9869ecca9b69"} Feb 27 17:46:56 crc kubenswrapper[4751]: I0227 17:46:56.106187 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=7.106148511 podStartE2EDuration="7.106148511s" podCreationTimestamp="2026-02-27 17:46:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:46:56.078512039 +0000 UTC m=+4978.225526526" watchObservedRunningTime="2026-02-27 17:46:56.106148511 +0000 UTC m=+4978.253162998" Feb 27 17:46:56 crc kubenswrapper[4751]: I0227 17:46:56.129929 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=9.129906372 podStartE2EDuration="9.129906372s" podCreationTimestamp="2026-02-27 17:46:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:46:56.122101738 +0000 UTC m=+4978.269116215" watchObservedRunningTime="2026-02-27 17:46:56.129906372 +0000 UTC m=+4978.276920829" Feb 27 17:46:56 crc kubenswrapper[4751]: I0227 17:46:56.499822 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:56 crc kubenswrapper[4751]: I0227 17:46:56.730989 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:46:56 crc kubenswrapper[4751]: I0227 17:46:56.792131 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-f8whw"] Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.062953 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" podUID="bc279045-0353-4e5c-9be9-5f98561874e5" containerName="dnsmasq-dns" containerID="cri-o://6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968" gracePeriod=10 Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.428531 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.537022 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-config\") pod \"bc279045-0353-4e5c-9be9-5f98561874e5\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.537097 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-dns-svc\") pod \"bc279045-0353-4e5c-9be9-5f98561874e5\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.537161 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7wcl\" (UniqueName: \"kubernetes.io/projected/bc279045-0353-4e5c-9be9-5f98561874e5-kube-api-access-c7wcl\") pod \"bc279045-0353-4e5c-9be9-5f98561874e5\" (UID: \"bc279045-0353-4e5c-9be9-5f98561874e5\") " Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.555569 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc279045-0353-4e5c-9be9-5f98561874e5-kube-api-access-c7wcl" (OuterVolumeSpecName: "kube-api-access-c7wcl") pod "bc279045-0353-4e5c-9be9-5f98561874e5" (UID: "bc279045-0353-4e5c-9be9-5f98561874e5"). InnerVolumeSpecName "kube-api-access-c7wcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.570566 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-config" (OuterVolumeSpecName: "config") pod "bc279045-0353-4e5c-9be9-5f98561874e5" (UID: "bc279045-0353-4e5c-9be9-5f98561874e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.576047 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bc279045-0353-4e5c-9be9-5f98561874e5" (UID: "bc279045-0353-4e5c-9be9-5f98561874e5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.639426 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7wcl\" (UniqueName: \"kubernetes.io/projected/bc279045-0353-4e5c-9be9-5f98561874e5-kube-api-access-c7wcl\") on node \"crc\" DevicePath \"\"" Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.639469 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-config\") on node \"crc\" DevicePath \"\"" Feb 27 17:46:57 crc kubenswrapper[4751]: I0227 17:46:57.639483 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc279045-0353-4e5c-9be9-5f98561874e5-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.074306 4751 generic.go:334] "Generic (PLEG): container finished" podID="bc279045-0353-4e5c-9be9-5f98561874e5" containerID="6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968" exitCode=0 Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.074375 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.074362 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" event={"ID":"bc279045-0353-4e5c-9be9-5f98561874e5","Type":"ContainerDied","Data":"6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968"} Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.075714 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-f8whw" event={"ID":"bc279045-0353-4e5c-9be9-5f98561874e5","Type":"ContainerDied","Data":"7cc84720a26044a0b84144d5f67ae8f69f143d6e6f0110ef79cfa9e3e01332a4"} Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.075737 4751 scope.go:117] "RemoveContainer" containerID="6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968" Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.106615 4751 scope.go:117] "RemoveContainer" containerID="aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5" Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.110861 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-f8whw"] Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.117794 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-f8whw"] Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.123966 4751 scope.go:117] "RemoveContainer" containerID="6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968" Feb 27 17:46:58 crc kubenswrapper[4751]: E0227 17:46:58.124357 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968\": container with ID starting with 6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968 not found: ID does not exist" containerID="6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968" Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.124386 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968"} err="failed to get container status \"6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968\": rpc error: code = NotFound desc = could not find container \"6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968\": container with ID starting with 6bda88ed98fdc446f4dd2b34df080121c254c3f1a9f8afb93a37dd01f3418968 not found: ID does not exist" Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.124431 4751 scope.go:117] "RemoveContainer" containerID="aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5" Feb 27 17:46:58 crc kubenswrapper[4751]: E0227 17:46:58.124720 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5\": container with ID starting with aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5 not found: ID does not exist" containerID="aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5" Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.124741 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5"} err="failed to get container status \"aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5\": rpc error: code = NotFound desc = could not find container \"aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5\": container with ID starting with aaa50bc9911d942fdb51763ef1266d7e2989354fed03ddeb3a278b601e40a6d5 not found: ID does not exist" Feb 27 17:46:58 crc kubenswrapper[4751]: I0227 17:46:58.542709 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc279045-0353-4e5c-9be9-5f98561874e5" path="/var/lib/kubelet/pods/bc279045-0353-4e5c-9be9-5f98561874e5/volumes" Feb 27 17:46:59 crc kubenswrapper[4751]: I0227 17:46:59.808627 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Feb 27 17:46:59 crc kubenswrapper[4751]: I0227 17:46:59.974859 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Feb 27 17:46:59 crc kubenswrapper[4751]: I0227 17:46:59.975001 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Feb 27 17:47:00 crc kubenswrapper[4751]: I0227 17:47:00.099966 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Feb 27 17:47:00 crc kubenswrapper[4751]: I0227 17:47:00.206398 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Feb 27 17:47:00 crc kubenswrapper[4751]: I0227 17:47:00.647103 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Feb 27 17:47:00 crc kubenswrapper[4751]: I0227 17:47:00.647499 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Feb 27 17:47:01 crc kubenswrapper[4751]: E0227 17:47:01.524132 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-mmdln" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" Feb 27 17:47:03 crc kubenswrapper[4751]: I0227 17:47:03.140233 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Feb 27 17:47:03 crc kubenswrapper[4751]: I0227 17:47:03.230656 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Feb 27 17:47:03 crc kubenswrapper[4751]: E0227 17:47:03.522980 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.709809 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-hpbtw"] Feb 27 17:47:07 crc kubenswrapper[4751]: E0227 17:47:07.710631 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc279045-0353-4e5c-9be9-5f98561874e5" containerName="dnsmasq-dns" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.710655 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc279045-0353-4e5c-9be9-5f98561874e5" containerName="dnsmasq-dns" Feb 27 17:47:07 crc kubenswrapper[4751]: E0227 17:47:07.710672 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc279045-0353-4e5c-9be9-5f98561874e5" containerName="init" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.710683 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc279045-0353-4e5c-9be9-5f98561874e5" containerName="init" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.710955 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc279045-0353-4e5c-9be9-5f98561874e5" containerName="dnsmasq-dns" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.711711 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-hpbtw" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.713660 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.719321 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-hpbtw"] Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.819198 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2df48f79-e4ea-45a7-a056-b014f90c42c9-operator-scripts\") pod \"root-account-create-update-hpbtw\" (UID: \"2df48f79-e4ea-45a7-a056-b014f90c42c9\") " pod="openstack/root-account-create-update-hpbtw" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.819276 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgzt6\" (UniqueName: \"kubernetes.io/projected/2df48f79-e4ea-45a7-a056-b014f90c42c9-kube-api-access-vgzt6\") pod \"root-account-create-update-hpbtw\" (UID: \"2df48f79-e4ea-45a7-a056-b014f90c42c9\") " pod="openstack/root-account-create-update-hpbtw" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.921470 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2df48f79-e4ea-45a7-a056-b014f90c42c9-operator-scripts\") pod \"root-account-create-update-hpbtw\" (UID: \"2df48f79-e4ea-45a7-a056-b014f90c42c9\") " pod="openstack/root-account-create-update-hpbtw" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.921601 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgzt6\" (UniqueName: \"kubernetes.io/projected/2df48f79-e4ea-45a7-a056-b014f90c42c9-kube-api-access-vgzt6\") pod \"root-account-create-update-hpbtw\" (UID: \"2df48f79-e4ea-45a7-a056-b014f90c42c9\") " pod="openstack/root-account-create-update-hpbtw" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.923318 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2df48f79-e4ea-45a7-a056-b014f90c42c9-operator-scripts\") pod \"root-account-create-update-hpbtw\" (UID: \"2df48f79-e4ea-45a7-a056-b014f90c42c9\") " pod="openstack/root-account-create-update-hpbtw" Feb 27 17:47:07 crc kubenswrapper[4751]: I0227 17:47:07.959111 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgzt6\" (UniqueName: \"kubernetes.io/projected/2df48f79-e4ea-45a7-a056-b014f90c42c9-kube-api-access-vgzt6\") pod \"root-account-create-update-hpbtw\" (UID: \"2df48f79-e4ea-45a7-a056-b014f90c42c9\") " pod="openstack/root-account-create-update-hpbtw" Feb 27 17:47:08 crc kubenswrapper[4751]: I0227 17:47:08.038712 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-hpbtw" Feb 27 17:47:08 crc kubenswrapper[4751]: I0227 17:47:08.564382 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-hpbtw"] Feb 27 17:47:08 crc kubenswrapper[4751]: W0227 17:47:08.571262 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2df48f79_e4ea_45a7_a056_b014f90c42c9.slice/crio-d478193fa10f1b84ab9e2796d1d8bb2e0c39460060e8a959d834b6abd6c93b90 WatchSource:0}: Error finding container d478193fa10f1b84ab9e2796d1d8bb2e0c39460060e8a959d834b6abd6c93b90: Status 404 returned error can't find the container with id d478193fa10f1b84ab9e2796d1d8bb2e0c39460060e8a959d834b6abd6c93b90 Feb 27 17:47:09 crc kubenswrapper[4751]: I0227 17:47:09.241874 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-hpbtw" event={"ID":"2df48f79-e4ea-45a7-a056-b014f90c42c9","Type":"ContainerStarted","Data":"7d3519e3f23106ad8a4119f3fd28686c8918f7ca6ab5db2724aa03674f071e31"} Feb 27 17:47:09 crc kubenswrapper[4751]: I0227 17:47:09.242238 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-hpbtw" event={"ID":"2df48f79-e4ea-45a7-a056-b014f90c42c9","Type":"ContainerStarted","Data":"d478193fa10f1b84ab9e2796d1d8bb2e0c39460060e8a959d834b6abd6c93b90"} Feb 27 17:47:09 crc kubenswrapper[4751]: I0227 17:47:09.266240 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-hpbtw" podStartSLOduration=2.266218705 podStartE2EDuration="2.266218705s" podCreationTimestamp="2026-02-27 17:47:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:47:09.259004367 +0000 UTC m=+4991.406018814" watchObservedRunningTime="2026-02-27 17:47:09.266218705 +0000 UTC m=+4991.413233152" Feb 27 17:47:10 crc kubenswrapper[4751]: I0227 17:47:10.252571 4751 generic.go:334] "Generic (PLEG): container finished" podID="2df48f79-e4ea-45a7-a056-b014f90c42c9" containerID="7d3519e3f23106ad8a4119f3fd28686c8918f7ca6ab5db2724aa03674f071e31" exitCode=0 Feb 27 17:47:10 crc kubenswrapper[4751]: I0227 17:47:10.252825 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-hpbtw" event={"ID":"2df48f79-e4ea-45a7-a056-b014f90c42c9","Type":"ContainerDied","Data":"7d3519e3f23106ad8a4119f3fd28686c8918f7ca6ab5db2724aa03674f071e31"} Feb 27 17:47:11 crc kubenswrapper[4751]: I0227 17:47:11.622828 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-hpbtw" Feb 27 17:47:11 crc kubenswrapper[4751]: I0227 17:47:11.689138 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2df48f79-e4ea-45a7-a056-b014f90c42c9-operator-scripts\") pod \"2df48f79-e4ea-45a7-a056-b014f90c42c9\" (UID: \"2df48f79-e4ea-45a7-a056-b014f90c42c9\") " Feb 27 17:47:11 crc kubenswrapper[4751]: I0227 17:47:11.689710 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgzt6\" (UniqueName: \"kubernetes.io/projected/2df48f79-e4ea-45a7-a056-b014f90c42c9-kube-api-access-vgzt6\") pod \"2df48f79-e4ea-45a7-a056-b014f90c42c9\" (UID: \"2df48f79-e4ea-45a7-a056-b014f90c42c9\") " Feb 27 17:47:11 crc kubenswrapper[4751]: I0227 17:47:11.690212 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2df48f79-e4ea-45a7-a056-b014f90c42c9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2df48f79-e4ea-45a7-a056-b014f90c42c9" (UID: "2df48f79-e4ea-45a7-a056-b014f90c42c9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:47:11 crc kubenswrapper[4751]: I0227 17:47:11.696582 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2df48f79-e4ea-45a7-a056-b014f90c42c9-kube-api-access-vgzt6" (OuterVolumeSpecName: "kube-api-access-vgzt6") pod "2df48f79-e4ea-45a7-a056-b014f90c42c9" (UID: "2df48f79-e4ea-45a7-a056-b014f90c42c9"). InnerVolumeSpecName "kube-api-access-vgzt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:47:11 crc kubenswrapper[4751]: I0227 17:47:11.792027 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgzt6\" (UniqueName: \"kubernetes.io/projected/2df48f79-e4ea-45a7-a056-b014f90c42c9-kube-api-access-vgzt6\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:11 crc kubenswrapper[4751]: I0227 17:47:11.792084 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2df48f79-e4ea-45a7-a056-b014f90c42c9-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:12 crc kubenswrapper[4751]: I0227 17:47:12.273196 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-hpbtw" event={"ID":"2df48f79-e4ea-45a7-a056-b014f90c42c9","Type":"ContainerDied","Data":"d478193fa10f1b84ab9e2796d1d8bb2e0c39460060e8a959d834b6abd6c93b90"} Feb 27 17:47:12 crc kubenswrapper[4751]: I0227 17:47:12.273251 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d478193fa10f1b84ab9e2796d1d8bb2e0c39460060e8a959d834b6abd6c93b90" Feb 27 17:47:12 crc kubenswrapper[4751]: I0227 17:47:12.273309 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-hpbtw" Feb 27 17:47:14 crc kubenswrapper[4751]: I0227 17:47:14.334264 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-hpbtw"] Feb 27 17:47:14 crc kubenswrapper[4751]: I0227 17:47:14.346329 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-hpbtw"] Feb 27 17:47:14 crc kubenswrapper[4751]: I0227 17:47:14.540255 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2df48f79-e4ea-45a7-a056-b014f90c42c9" path="/var/lib/kubelet/pods/2df48f79-e4ea-45a7-a056-b014f90c42c9/volumes" Feb 27 17:47:16 crc kubenswrapper[4751]: E0227 17:47:16.162629 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 27 17:47:16 crc kubenswrapper[4751]: E0227 17:47:16.162960 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kczvp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-mmdln_openshift-marketplace(028097b5-dd4b-4860-bbe4-aa3cb5a79c2c): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:47:16 crc kubenswrapper[4751]: E0227 17:47:16.164197 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/redhat-marketplace-mmdln" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" Feb 27 17:47:18 crc kubenswrapper[4751]: E0227 17:47:18.531967 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.327346 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-zxkc7"] Feb 27 17:47:19 crc kubenswrapper[4751]: E0227 17:47:19.327684 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2df48f79-e4ea-45a7-a056-b014f90c42c9" containerName="mariadb-account-create-update" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.327700 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="2df48f79-e4ea-45a7-a056-b014f90c42c9" containerName="mariadb-account-create-update" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.327829 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="2df48f79-e4ea-45a7-a056-b014f90c42c9" containerName="mariadb-account-create-update" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.328296 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zxkc7" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.330892 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.352052 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zxkc7"] Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.429263 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdd62\" (UniqueName: \"kubernetes.io/projected/d766e975-f2e6-4733-aaea-d3725ec03ec2-kube-api-access-hdd62\") pod \"root-account-create-update-zxkc7\" (UID: \"d766e975-f2e6-4733-aaea-d3725ec03ec2\") " pod="openstack/root-account-create-update-zxkc7" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.429459 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d766e975-f2e6-4733-aaea-d3725ec03ec2-operator-scripts\") pod \"root-account-create-update-zxkc7\" (UID: \"d766e975-f2e6-4733-aaea-d3725ec03ec2\") " pod="openstack/root-account-create-update-zxkc7" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.530730 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d766e975-f2e6-4733-aaea-d3725ec03ec2-operator-scripts\") pod \"root-account-create-update-zxkc7\" (UID: \"d766e975-f2e6-4733-aaea-d3725ec03ec2\") " pod="openstack/root-account-create-update-zxkc7" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.530877 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdd62\" (UniqueName: \"kubernetes.io/projected/d766e975-f2e6-4733-aaea-d3725ec03ec2-kube-api-access-hdd62\") pod \"root-account-create-update-zxkc7\" (UID: \"d766e975-f2e6-4733-aaea-d3725ec03ec2\") " pod="openstack/root-account-create-update-zxkc7" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.532094 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d766e975-f2e6-4733-aaea-d3725ec03ec2-operator-scripts\") pod \"root-account-create-update-zxkc7\" (UID: \"d766e975-f2e6-4733-aaea-d3725ec03ec2\") " pod="openstack/root-account-create-update-zxkc7" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.564767 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdd62\" (UniqueName: \"kubernetes.io/projected/d766e975-f2e6-4733-aaea-d3725ec03ec2-kube-api-access-hdd62\") pod \"root-account-create-update-zxkc7\" (UID: \"d766e975-f2e6-4733-aaea-d3725ec03ec2\") " pod="openstack/root-account-create-update-zxkc7" Feb 27 17:47:19 crc kubenswrapper[4751]: I0227 17:47:19.714331 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zxkc7" Feb 27 17:47:20 crc kubenswrapper[4751]: I0227 17:47:20.244521 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zxkc7"] Feb 27 17:47:20 crc kubenswrapper[4751]: W0227 17:47:20.413476 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd766e975_f2e6_4733_aaea_d3725ec03ec2.slice/crio-c523a0506aac70319b9d38f0df3e44d07260a3254ae51ec98d761768b23371a0 WatchSource:0}: Error finding container c523a0506aac70319b9d38f0df3e44d07260a3254ae51ec98d761768b23371a0: Status 404 returned error can't find the container with id c523a0506aac70319b9d38f0df3e44d07260a3254ae51ec98d761768b23371a0 Feb 27 17:47:21 crc kubenswrapper[4751]: I0227 17:47:21.366319 4751 generic.go:334] "Generic (PLEG): container finished" podID="b5737b22-9f6b-4358-8078-ba61f4ecaa55" containerID="420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d" exitCode=0 Feb 27 17:47:21 crc kubenswrapper[4751]: I0227 17:47:21.366447 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b5737b22-9f6b-4358-8078-ba61f4ecaa55","Type":"ContainerDied","Data":"420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d"} Feb 27 17:47:21 crc kubenswrapper[4751]: I0227 17:47:21.368429 4751 generic.go:334] "Generic (PLEG): container finished" podID="d766e975-f2e6-4733-aaea-d3725ec03ec2" containerID="47768b7f4071f5f4ba301d2c2afc27d314d158aa4007096a04ca21761cbb9d09" exitCode=0 Feb 27 17:47:21 crc kubenswrapper[4751]: I0227 17:47:21.368469 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zxkc7" event={"ID":"d766e975-f2e6-4733-aaea-d3725ec03ec2","Type":"ContainerDied","Data":"47768b7f4071f5f4ba301d2c2afc27d314d158aa4007096a04ca21761cbb9d09"} Feb 27 17:47:21 crc kubenswrapper[4751]: I0227 17:47:21.368496 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zxkc7" event={"ID":"d766e975-f2e6-4733-aaea-d3725ec03ec2","Type":"ContainerStarted","Data":"c523a0506aac70319b9d38f0df3e44d07260a3254ae51ec98d761768b23371a0"} Feb 27 17:47:22 crc kubenswrapper[4751]: I0227 17:47:22.381485 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b5737b22-9f6b-4358-8078-ba61f4ecaa55","Type":"ContainerStarted","Data":"da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f"} Feb 27 17:47:22 crc kubenswrapper[4751]: I0227 17:47:22.382693 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Feb 27 17:47:22 crc kubenswrapper[4751]: I0227 17:47:22.422668 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.422628902 podStartE2EDuration="36.422628902s" podCreationTimestamp="2026-02-27 17:46:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:47:22.410709171 +0000 UTC m=+5004.557723678" watchObservedRunningTime="2026-02-27 17:47:22.422628902 +0000 UTC m=+5004.569643399" Feb 27 17:47:22 crc kubenswrapper[4751]: I0227 17:47:22.708371 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zxkc7" Feb 27 17:47:22 crc kubenswrapper[4751]: I0227 17:47:22.800509 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d766e975-f2e6-4733-aaea-d3725ec03ec2-operator-scripts\") pod \"d766e975-f2e6-4733-aaea-d3725ec03ec2\" (UID: \"d766e975-f2e6-4733-aaea-d3725ec03ec2\") " Feb 27 17:47:22 crc kubenswrapper[4751]: I0227 17:47:22.800796 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdd62\" (UniqueName: \"kubernetes.io/projected/d766e975-f2e6-4733-aaea-d3725ec03ec2-kube-api-access-hdd62\") pod \"d766e975-f2e6-4733-aaea-d3725ec03ec2\" (UID: \"d766e975-f2e6-4733-aaea-d3725ec03ec2\") " Feb 27 17:47:22 crc kubenswrapper[4751]: I0227 17:47:22.800991 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d766e975-f2e6-4733-aaea-d3725ec03ec2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d766e975-f2e6-4733-aaea-d3725ec03ec2" (UID: "d766e975-f2e6-4733-aaea-d3725ec03ec2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:47:22 crc kubenswrapper[4751]: I0227 17:47:22.801420 4751 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d766e975-f2e6-4733-aaea-d3725ec03ec2-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:22 crc kubenswrapper[4751]: I0227 17:47:22.809134 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d766e975-f2e6-4733-aaea-d3725ec03ec2-kube-api-access-hdd62" (OuterVolumeSpecName: "kube-api-access-hdd62") pod "d766e975-f2e6-4733-aaea-d3725ec03ec2" (UID: "d766e975-f2e6-4733-aaea-d3725ec03ec2"). InnerVolumeSpecName "kube-api-access-hdd62". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:47:22 crc kubenswrapper[4751]: I0227 17:47:22.903290 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdd62\" (UniqueName: \"kubernetes.io/projected/d766e975-f2e6-4733-aaea-d3725ec03ec2-kube-api-access-hdd62\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:23 crc kubenswrapper[4751]: I0227 17:47:23.390123 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zxkc7" Feb 27 17:47:23 crc kubenswrapper[4751]: I0227 17:47:23.390144 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zxkc7" event={"ID":"d766e975-f2e6-4733-aaea-d3725ec03ec2","Type":"ContainerDied","Data":"c523a0506aac70319b9d38f0df3e44d07260a3254ae51ec98d761768b23371a0"} Feb 27 17:47:23 crc kubenswrapper[4751]: I0227 17:47:23.390200 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c523a0506aac70319b9d38f0df3e44d07260a3254ae51ec98d761768b23371a0" Feb 27 17:47:23 crc kubenswrapper[4751]: I0227 17:47:23.391837 4751 generic.go:334] "Generic (PLEG): container finished" podID="09b01bad-88e2-4cfc-874d-28c567cba1ea" containerID="dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3" exitCode=0 Feb 27 17:47:23 crc kubenswrapper[4751]: I0227 17:47:23.391869 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"09b01bad-88e2-4cfc-874d-28c567cba1ea","Type":"ContainerDied","Data":"dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3"} Feb 27 17:47:24 crc kubenswrapper[4751]: I0227 17:47:24.404106 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"09b01bad-88e2-4cfc-874d-28c567cba1ea","Type":"ContainerStarted","Data":"5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721"} Feb 27 17:47:24 crc kubenswrapper[4751]: I0227 17:47:24.404810 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:24 crc kubenswrapper[4751]: I0227 17:47:24.442364 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.442337199 podStartE2EDuration="38.442337199s" podCreationTimestamp="2026-02-27 17:46:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:47:24.431383572 +0000 UTC m=+5006.578398049" watchObservedRunningTime="2026-02-27 17:47:24.442337199 +0000 UTC m=+5006.589351686" Feb 27 17:47:29 crc kubenswrapper[4751]: E0227 17:47:29.524435 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:47:30 crc kubenswrapper[4751]: E0227 17:47:30.523148 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-mmdln" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" Feb 27 17:47:37 crc kubenswrapper[4751]: I0227 17:47:37.657630 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Feb 27 17:47:37 crc kubenswrapper[4751]: I0227 17:47:37.927683 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:40 crc kubenswrapper[4751]: E0227 17:47:40.524005 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:47:42 crc kubenswrapper[4751]: E0227 17:47:42.523981 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-mmdln" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.103862 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-5n9cf"] Feb 27 17:47:44 crc kubenswrapper[4751]: E0227 17:47:44.104265 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d766e975-f2e6-4733-aaea-d3725ec03ec2" containerName="mariadb-account-create-update" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.104281 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d766e975-f2e6-4733-aaea-d3725ec03ec2" containerName="mariadb-account-create-update" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.104542 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d766e975-f2e6-4733-aaea-d3725ec03ec2" containerName="mariadb-account-create-update" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.105566 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.127210 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-5n9cf"] Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.238182 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e1a4b82-5d0d-40aa-98cf-8449a543be92-config\") pod \"dnsmasq-dns-5b7946d7b9-5n9cf\" (UID: \"5e1a4b82-5d0d-40aa-98cf-8449a543be92\") " pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.238281 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e1a4b82-5d0d-40aa-98cf-8449a543be92-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-5n9cf\" (UID: \"5e1a4b82-5d0d-40aa-98cf-8449a543be92\") " pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.238326 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttgvg\" (UniqueName: \"kubernetes.io/projected/5e1a4b82-5d0d-40aa-98cf-8449a543be92-kube-api-access-ttgvg\") pod \"dnsmasq-dns-5b7946d7b9-5n9cf\" (UID: \"5e1a4b82-5d0d-40aa-98cf-8449a543be92\") " pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.339581 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e1a4b82-5d0d-40aa-98cf-8449a543be92-config\") pod \"dnsmasq-dns-5b7946d7b9-5n9cf\" (UID: \"5e1a4b82-5d0d-40aa-98cf-8449a543be92\") " pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.339668 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e1a4b82-5d0d-40aa-98cf-8449a543be92-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-5n9cf\" (UID: \"5e1a4b82-5d0d-40aa-98cf-8449a543be92\") " pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.339695 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttgvg\" (UniqueName: \"kubernetes.io/projected/5e1a4b82-5d0d-40aa-98cf-8449a543be92-kube-api-access-ttgvg\") pod \"dnsmasq-dns-5b7946d7b9-5n9cf\" (UID: \"5e1a4b82-5d0d-40aa-98cf-8449a543be92\") " pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.340980 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e1a4b82-5d0d-40aa-98cf-8449a543be92-config\") pod \"dnsmasq-dns-5b7946d7b9-5n9cf\" (UID: \"5e1a4b82-5d0d-40aa-98cf-8449a543be92\") " pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.341994 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e1a4b82-5d0d-40aa-98cf-8449a543be92-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-5n9cf\" (UID: \"5e1a4b82-5d0d-40aa-98cf-8449a543be92\") " pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.356721 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttgvg\" (UniqueName: \"kubernetes.io/projected/5e1a4b82-5d0d-40aa-98cf-8449a543be92-kube-api-access-ttgvg\") pod \"dnsmasq-dns-5b7946d7b9-5n9cf\" (UID: \"5e1a4b82-5d0d-40aa-98cf-8449a543be92\") " pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:44 crc kubenswrapper[4751]: I0227 17:47:44.423821 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:46 crc kubenswrapper[4751]: E0227 17:47:46.048611 4751 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.527s" Feb 27 17:47:46 crc kubenswrapper[4751]: I0227 17:47:46.110937 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-5n9cf"] Feb 27 17:47:46 crc kubenswrapper[4751]: I0227 17:47:46.555631 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 17:47:47 crc kubenswrapper[4751]: I0227 17:47:47.073850 4751 generic.go:334] "Generic (PLEG): container finished" podID="5e1a4b82-5d0d-40aa-98cf-8449a543be92" containerID="8d4ea59b5b4b08fdd652e91a37500fd2afbf7ba4c6b30e4fa3b5880986c5c161" exitCode=0 Feb 27 17:47:47 crc kubenswrapper[4751]: I0227 17:47:47.075020 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" event={"ID":"5e1a4b82-5d0d-40aa-98cf-8449a543be92","Type":"ContainerDied","Data":"8d4ea59b5b4b08fdd652e91a37500fd2afbf7ba4c6b30e4fa3b5880986c5c161"} Feb 27 17:47:47 crc kubenswrapper[4751]: I0227 17:47:47.075099 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" event={"ID":"5e1a4b82-5d0d-40aa-98cf-8449a543be92","Type":"ContainerStarted","Data":"78dd0a5ca6f9b8487e8294c3aa898c35643dc6ec1b4eba0a6727c7a99b8d634b"} Feb 27 17:47:47 crc kubenswrapper[4751]: I0227 17:47:47.307128 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 17:47:48 crc kubenswrapper[4751]: I0227 17:47:48.083746 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" event={"ID":"5e1a4b82-5d0d-40aa-98cf-8449a543be92","Type":"ContainerStarted","Data":"ec252f4518dfb0c9aea95963c78a48394f6f5caa6c65fb9f73ab7509d5aa1022"} Feb 27 17:47:48 crc kubenswrapper[4751]: I0227 17:47:48.084922 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:48 crc kubenswrapper[4751]: I0227 17:47:48.106632 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" podStartSLOduration=4.106610994 podStartE2EDuration="4.106610994s" podCreationTimestamp="2026-02-27 17:47:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:47:48.100762201 +0000 UTC m=+5030.247776658" watchObservedRunningTime="2026-02-27 17:47:48.106610994 +0000 UTC m=+5030.253625441" Feb 27 17:47:48 crc kubenswrapper[4751]: I0227 17:47:48.369085 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="b5737b22-9f6b-4358-8078-ba61f4ecaa55" containerName="rabbitmq" containerID="cri-o://da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f" gracePeriod=604799 Feb 27 17:47:48 crc kubenswrapper[4751]: I0227 17:47:48.965036 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="09b01bad-88e2-4cfc-874d-28c567cba1ea" containerName="rabbitmq" containerID="cri-o://5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721" gracePeriod=604799 Feb 27 17:47:52 crc kubenswrapper[4751]: E0227 17:47:52.526023 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:47:54 crc kubenswrapper[4751]: I0227 17:47:54.426740 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b7946d7b9-5n9cf" Feb 27 17:47:54 crc kubenswrapper[4751]: I0227 17:47:54.507389 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-679g7"] Feb 27 17:47:54 crc kubenswrapper[4751]: I0227 17:47:54.515766 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" podUID="3f97612c-7a8b-4e2c-93c0-02889ef80459" containerName="dnsmasq-dns" containerID="cri-o://3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae" gracePeriod=10 Feb 27 17:47:54 crc kubenswrapper[4751]: I0227 17:47:54.953396 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 27 17:47:54 crc kubenswrapper[4751]: I0227 17:47:54.960055 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.101639 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-config\") pod \"3f97612c-7a8b-4e2c-93c0-02889ef80459\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.101698 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-plugins\") pod \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.101753 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b5737b22-9f6b-4358-8078-ba61f4ecaa55-erlang-cookie-secret\") pod \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.101875 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") pod \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.101922 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-confd\") pod \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.101945 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-server-conf\") pod \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.101984 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b5737b22-9f6b-4358-8078-ba61f4ecaa55-pod-info\") pod \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.102024 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-dns-svc\") pod \"3f97612c-7a8b-4e2c-93c0-02889ef80459\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.102102 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-erlang-cookie\") pod \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.102143 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdkwm\" (UniqueName: \"kubernetes.io/projected/3f97612c-7a8b-4e2c-93c0-02889ef80459-kube-api-access-vdkwm\") pod \"3f97612c-7a8b-4e2c-93c0-02889ef80459\" (UID: \"3f97612c-7a8b-4e2c-93c0-02889ef80459\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.102202 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92mxv\" (UniqueName: \"kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-kube-api-access-92mxv\") pod \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.102262 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-plugins-conf\") pod \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\" (UID: \"b5737b22-9f6b-4358-8078-ba61f4ecaa55\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.103801 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "b5737b22-9f6b-4358-8078-ba61f4ecaa55" (UID: "b5737b22-9f6b-4358-8078-ba61f4ecaa55"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.104302 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "b5737b22-9f6b-4358-8078-ba61f4ecaa55" (UID: "b5737b22-9f6b-4358-8078-ba61f4ecaa55"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.104512 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "b5737b22-9f6b-4358-8078-ba61f4ecaa55" (UID: "b5737b22-9f6b-4358-8078-ba61f4ecaa55"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.107280 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-kube-api-access-92mxv" (OuterVolumeSpecName: "kube-api-access-92mxv") pod "b5737b22-9f6b-4358-8078-ba61f4ecaa55" (UID: "b5737b22-9f6b-4358-8078-ba61f4ecaa55"). InnerVolumeSpecName "kube-api-access-92mxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.107792 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5737b22-9f6b-4358-8078-ba61f4ecaa55-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "b5737b22-9f6b-4358-8078-ba61f4ecaa55" (UID: "b5737b22-9f6b-4358-8078-ba61f4ecaa55"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.108169 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/b5737b22-9f6b-4358-8078-ba61f4ecaa55-pod-info" (OuterVolumeSpecName: "pod-info") pod "b5737b22-9f6b-4358-8078-ba61f4ecaa55" (UID: "b5737b22-9f6b-4358-8078-ba61f4ecaa55"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.116677 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f97612c-7a8b-4e2c-93c0-02889ef80459-kube-api-access-vdkwm" (OuterVolumeSpecName: "kube-api-access-vdkwm") pod "3f97612c-7a8b-4e2c-93c0-02889ef80459" (UID: "3f97612c-7a8b-4e2c-93c0-02889ef80459"). InnerVolumeSpecName "kube-api-access-vdkwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.122729 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125" (OuterVolumeSpecName: "persistence") pod "b5737b22-9f6b-4358-8078-ba61f4ecaa55" (UID: "b5737b22-9f6b-4358-8078-ba61f4ecaa55"). InnerVolumeSpecName "pvc-39b18a24-169e-435c-baa3-bfd95e5cf125". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.127875 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-server-conf" (OuterVolumeSpecName: "server-conf") pod "b5737b22-9f6b-4358-8078-ba61f4ecaa55" (UID: "b5737b22-9f6b-4358-8078-ba61f4ecaa55"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.145618 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3f97612c-7a8b-4e2c-93c0-02889ef80459" (UID: "3f97612c-7a8b-4e2c-93c0-02889ef80459"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.156595 4751 generic.go:334] "Generic (PLEG): container finished" podID="b5737b22-9f6b-4358-8078-ba61f4ecaa55" containerID="da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f" exitCode=0 Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.156659 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b5737b22-9f6b-4358-8078-ba61f4ecaa55","Type":"ContainerDied","Data":"da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f"} Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.156688 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b5737b22-9f6b-4358-8078-ba61f4ecaa55","Type":"ContainerDied","Data":"0defa6a50b142148f1c9513c200453509a978c473e743bccf691baf82660f744"} Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.156704 4751 scope.go:117] "RemoveContainer" containerID="da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.156806 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.160073 4751 generic.go:334] "Generic (PLEG): container finished" podID="3f97612c-7a8b-4e2c-93c0-02889ef80459" containerID="3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae" exitCode=0 Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.160097 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" event={"ID":"3f97612c-7a8b-4e2c-93c0-02889ef80459","Type":"ContainerDied","Data":"3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae"} Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.160113 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" event={"ID":"3f97612c-7a8b-4e2c-93c0-02889ef80459","Type":"ContainerDied","Data":"85a68c3c120bc14fc2572a2a10b61c7bcb383238e95d5d7426ecdb98a09c667c"} Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.160151 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-679g7" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.170353 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-config" (OuterVolumeSpecName: "config") pod "3f97612c-7a8b-4e2c-93c0-02889ef80459" (UID: "3f97612c-7a8b-4e2c-93c0-02889ef80459"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204227 4751 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204261 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204273 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdkwm\" (UniqueName: \"kubernetes.io/projected/3f97612c-7a8b-4e2c-93c0-02889ef80459-kube-api-access-vdkwm\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204282 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92mxv\" (UniqueName: \"kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-kube-api-access-92mxv\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204292 4751 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204302 4751 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f97612c-7a8b-4e2c-93c0-02889ef80459-config\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204312 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204321 4751 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b5737b22-9f6b-4358-8078-ba61f4ecaa55-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204356 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") on node \"crc\" " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204368 4751 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b5737b22-9f6b-4358-8078-ba61f4ecaa55-server-conf\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.204380 4751 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b5737b22-9f6b-4358-8078-ba61f4ecaa55-pod-info\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.218801 4751 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.218930 4751 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-39b18a24-169e-435c-baa3-bfd95e5cf125" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125") on node "crc" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.221035 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "b5737b22-9f6b-4358-8078-ba61f4ecaa55" (UID: "b5737b22-9f6b-4358-8078-ba61f4ecaa55"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.255366 4751 scope.go:117] "RemoveContainer" containerID="420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.272099 4751 scope.go:117] "RemoveContainer" containerID="da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f" Feb 27 17:47:55 crc kubenswrapper[4751]: E0227 17:47:55.272498 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f\": container with ID starting with da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f not found: ID does not exist" containerID="da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.272541 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f"} err="failed to get container status \"da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f\": rpc error: code = NotFound desc = could not find container \"da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f\": container with ID starting with da0b6a80eb583e00165de033781bbf16abf64a4b73735c475613ff94b430e47f not found: ID does not exist" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.272565 4751 scope.go:117] "RemoveContainer" containerID="420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d" Feb 27 17:47:55 crc kubenswrapper[4751]: E0227 17:47:55.273743 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d\": container with ID starting with 420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d not found: ID does not exist" containerID="420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.273776 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d"} err="failed to get container status \"420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d\": rpc error: code = NotFound desc = could not find container \"420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d\": container with ID starting with 420bf1bc21a8744fa0f8d182fa935dfb129c39162a0f28aec21e75818238d60d not found: ID does not exist" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.273805 4751 scope.go:117] "RemoveContainer" containerID="3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.292232 4751 scope.go:117] "RemoveContainer" containerID="2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.305558 4751 reconciler_common.go:293] "Volume detached for volume \"pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.305579 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b5737b22-9f6b-4358-8078-ba61f4ecaa55-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.307487 4751 scope.go:117] "RemoveContainer" containerID="3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae" Feb 27 17:47:55 crc kubenswrapper[4751]: E0227 17:47:55.307812 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae\": container with ID starting with 3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae not found: ID does not exist" containerID="3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.307838 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae"} err="failed to get container status \"3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae\": rpc error: code = NotFound desc = could not find container \"3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae\": container with ID starting with 3d9281e9d4590e4c262ce766931e2e7851dbc3b506e81b43a42879714c1437ae not found: ID does not exist" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.307858 4751 scope.go:117] "RemoveContainer" containerID="2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab" Feb 27 17:47:55 crc kubenswrapper[4751]: E0227 17:47:55.308118 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab\": container with ID starting with 2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab not found: ID does not exist" containerID="2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.308165 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab"} err="failed to get container status \"2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab\": rpc error: code = NotFound desc = could not find container \"2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab\": container with ID starting with 2dd735aa8fcbb1ca62075d58e199deedc900ac113d47f20ef967a641966b98ab not found: ID does not exist" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.444253 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.508129 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-confd\") pod \"09b01bad-88e2-4cfc-874d-28c567cba1ea\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.508454 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-server-conf\") pod \"09b01bad-88e2-4cfc-874d-28c567cba1ea\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.508498 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4v8v8\" (UniqueName: \"kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-kube-api-access-4v8v8\") pod \"09b01bad-88e2-4cfc-874d-28c567cba1ea\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.512856 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-679g7"] Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.513135 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-kube-api-access-4v8v8" (OuterVolumeSpecName: "kube-api-access-4v8v8") pod "09b01bad-88e2-4cfc-874d-28c567cba1ea" (UID: "09b01bad-88e2-4cfc-874d-28c567cba1ea"). InnerVolumeSpecName "kube-api-access-4v8v8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.529984 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-server-conf" (OuterVolumeSpecName: "server-conf") pod "09b01bad-88e2-4cfc-874d-28c567cba1ea" (UID: "09b01bad-88e2-4cfc-874d-28c567cba1ea"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.539961 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-679g7"] Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.554984 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.567477 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.574413 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 17:47:55 crc kubenswrapper[4751]: E0227 17:47:55.574701 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f97612c-7a8b-4e2c-93c0-02889ef80459" containerName="init" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.574718 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f97612c-7a8b-4e2c-93c0-02889ef80459" containerName="init" Feb 27 17:47:55 crc kubenswrapper[4751]: E0227 17:47:55.574737 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5737b22-9f6b-4358-8078-ba61f4ecaa55" containerName="rabbitmq" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.574743 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5737b22-9f6b-4358-8078-ba61f4ecaa55" containerName="rabbitmq" Feb 27 17:47:55 crc kubenswrapper[4751]: E0227 17:47:55.574750 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b01bad-88e2-4cfc-874d-28c567cba1ea" containerName="rabbitmq" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.574756 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b01bad-88e2-4cfc-874d-28c567cba1ea" containerName="rabbitmq" Feb 27 17:47:55 crc kubenswrapper[4751]: E0227 17:47:55.574767 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b01bad-88e2-4cfc-874d-28c567cba1ea" containerName="setup-container" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.574773 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b01bad-88e2-4cfc-874d-28c567cba1ea" containerName="setup-container" Feb 27 17:47:55 crc kubenswrapper[4751]: E0227 17:47:55.574783 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5737b22-9f6b-4358-8078-ba61f4ecaa55" containerName="setup-container" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.574789 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5737b22-9f6b-4358-8078-ba61f4ecaa55" containerName="setup-container" Feb 27 17:47:55 crc kubenswrapper[4751]: E0227 17:47:55.574799 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f97612c-7a8b-4e2c-93c0-02889ef80459" containerName="dnsmasq-dns" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.574820 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f97612c-7a8b-4e2c-93c0-02889ef80459" containerName="dnsmasq-dns" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.574946 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5737b22-9f6b-4358-8078-ba61f4ecaa55" containerName="rabbitmq" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.574964 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="09b01bad-88e2-4cfc-874d-28c567cba1ea" containerName="rabbitmq" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.574973 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f97612c-7a8b-4e2c-93c0-02889ef80459" containerName="dnsmasq-dns" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.575685 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.577881 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.578058 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.578253 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-lfxm5" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.580028 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.593636 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.593769 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.619147 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-plugins\") pod \"09b01bad-88e2-4cfc-874d-28c567cba1ea\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.619491 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/09b01bad-88e2-4cfc-874d-28c567cba1ea-pod-info\") pod \"09b01bad-88e2-4cfc-874d-28c567cba1ea\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.619735 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "09b01bad-88e2-4cfc-874d-28c567cba1ea" (UID: "09b01bad-88e2-4cfc-874d-28c567cba1ea"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.620087 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-plugins-conf\") pod \"09b01bad-88e2-4cfc-874d-28c567cba1ea\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.620120 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/09b01bad-88e2-4cfc-874d-28c567cba1ea-erlang-cookie-secret\") pod \"09b01bad-88e2-4cfc-874d-28c567cba1ea\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.620163 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-erlang-cookie\") pod \"09b01bad-88e2-4cfc-874d-28c567cba1ea\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.620294 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") pod \"09b01bad-88e2-4cfc-874d-28c567cba1ea\" (UID: \"09b01bad-88e2-4cfc-874d-28c567cba1ea\") " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.620850 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.620875 4751 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-server-conf\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.620888 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4v8v8\" (UniqueName: \"kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-kube-api-access-4v8v8\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.621925 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "09b01bad-88e2-4cfc-874d-28c567cba1ea" (UID: "09b01bad-88e2-4cfc-874d-28c567cba1ea"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.621982 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "09b01bad-88e2-4cfc-874d-28c567cba1ea" (UID: "09b01bad-88e2-4cfc-874d-28c567cba1ea"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.622566 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/09b01bad-88e2-4cfc-874d-28c567cba1ea-pod-info" (OuterVolumeSpecName: "pod-info") pod "09b01bad-88e2-4cfc-874d-28c567cba1ea" (UID: "09b01bad-88e2-4cfc-874d-28c567cba1ea"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.633691 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09b01bad-88e2-4cfc-874d-28c567cba1ea-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "09b01bad-88e2-4cfc-874d-28c567cba1ea" (UID: "09b01bad-88e2-4cfc-874d-28c567cba1ea"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.646326 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95" (OuterVolumeSpecName: "persistence") pod "09b01bad-88e2-4cfc-874d-28c567cba1ea" (UID: "09b01bad-88e2-4cfc-874d-28c567cba1ea"). InnerVolumeSpecName "pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.671274 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "09b01bad-88e2-4cfc-874d-28c567cba1ea" (UID: "09b01bad-88e2-4cfc-874d-28c567cba1ea"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.721681 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.721818 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.721838 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v75td\" (UniqueName: \"kubernetes.io/projected/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-kube-api-access-v75td\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.721865 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-server-conf\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.721883 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.722184 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.722241 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.722324 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-pod-info\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.722393 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.722587 4751 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/09b01bad-88e2-4cfc-874d-28c567cba1ea-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.722605 4751 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/09b01bad-88e2-4cfc-874d-28c567cba1ea-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.722614 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.722638 4751 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") on node \"crc\" " Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.722652 4751 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/09b01bad-88e2-4cfc-874d-28c567cba1ea-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.722661 4751 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/09b01bad-88e2-4cfc-874d-28c567cba1ea-pod-info\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.741210 4751 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.741520 4751 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95") on node "crc" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.824307 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-pod-info\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.824367 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.824397 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.824487 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.824504 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v75td\" (UniqueName: \"kubernetes.io/projected/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-kube-api-access-v75td\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.824530 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-server-conf\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.824549 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.824570 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.824584 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.824628 4751 reconciler_common.go:293] "Volume detached for volume \"pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") on node \"crc\" DevicePath \"\"" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.825301 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.826212 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.826449 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-server-conf\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.827055 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.828154 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-pod-info\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.828249 4751 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.828271 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/17ae5b1c4afad9afea74dcc35dc03ee0cd4dd4b37cf32e8d6a15e95fb4e7ab1f/globalmount\"" pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.830742 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.843373 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.857008 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v75td\" (UniqueName: \"kubernetes.io/projected/1784ca9a-fc20-4ed5-b770-3f1ea06b7065-kube-api-access-v75td\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:55 crc kubenswrapper[4751]: I0227 17:47:55.871093 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39b18a24-169e-435c-baa3-bfd95e5cf125\") pod \"rabbitmq-server-0\" (UID: \"1784ca9a-fc20-4ed5-b770-3f1ea06b7065\") " pod="openstack/rabbitmq-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.016735 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.189899 4751 generic.go:334] "Generic (PLEG): container finished" podID="09b01bad-88e2-4cfc-874d-28c567cba1ea" containerID="5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721" exitCode=0 Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.190911 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"09b01bad-88e2-4cfc-874d-28c567cba1ea","Type":"ContainerDied","Data":"5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721"} Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.192291 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"09b01bad-88e2-4cfc-874d-28c567cba1ea","Type":"ContainerDied","Data":"b7ec5995e40e1b3ad228afba459965412f6fbea3c1398891b0dfc482933e6386"} Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.192367 4751 scope.go:117] "RemoveContainer" containerID="5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.193625 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.227609 4751 scope.go:117] "RemoveContainer" containerID="dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.255574 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.268552 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.280856 4751 scope.go:117] "RemoveContainer" containerID="5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.282949 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 17:47:56 crc kubenswrapper[4751]: E0227 17:47:56.285399 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721\": container with ID starting with 5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721 not found: ID does not exist" containerID="5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.285483 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721"} err="failed to get container status \"5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721\": rpc error: code = NotFound desc = could not find container \"5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721\": container with ID starting with 5fecee14fa06510ffec1efa4402a61cac2aa9d0b49cb625e8ec7b50d71320721 not found: ID does not exist" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.285511 4751 scope.go:117] "RemoveContainer" containerID="dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.285883 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: E0227 17:47:56.287318 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3\": container with ID starting with dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3 not found: ID does not exist" containerID="dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.287346 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3"} err="failed to get container status \"dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3\": rpc error: code = NotFound desc = could not find container \"dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3\": container with ID starting with dc475cbc1b9eb52496b1d4d1dc4c837f0fba67a9db6b1f01c8822f42f9f756c3 not found: ID does not exist" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.290303 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.291051 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.292348 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.292889 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-b5zj2" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.293127 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.302798 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.442124 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.442194 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.442243 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.442283 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.442352 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.442484 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqqb8\" (UniqueName: \"kubernetes.io/projected/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-kube-api-access-vqqb8\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.442598 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.442710 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.442753 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.535341 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09b01bad-88e2-4cfc-874d-28c567cba1ea" path="/var/lib/kubelet/pods/09b01bad-88e2-4cfc-874d-28c567cba1ea/volumes" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.536766 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f97612c-7a8b-4e2c-93c0-02889ef80459" path="/var/lib/kubelet/pods/3f97612c-7a8b-4e2c-93c0-02889ef80459/volumes" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.537807 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5737b22-9f6b-4358-8078-ba61f4ecaa55" path="/var/lib/kubelet/pods/b5737b22-9f6b-4358-8078-ba61f4ecaa55/volumes" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.544274 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.544349 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.544394 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.544481 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.544539 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.544596 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqqb8\" (UniqueName: \"kubernetes.io/projected/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-kube-api-access-vqqb8\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.544667 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.544751 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.544784 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.545028 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.545381 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.545707 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.545872 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.550044 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.550253 4751 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.550306 4751 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fa695d6d534e31bdc767ef4b2993b9c7e55a3b9645abe15cec55d09e67c1246e/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.551918 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.553033 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.568876 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqqb8\" (UniqueName: \"kubernetes.io/projected/72e8aad9-a325-4eb4-87fa-6b326ceb9a26-kube-api-access-vqqb8\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.571924 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.588716 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c90ebf-cb47-43a2-97c0-3b2ca414ee95\") pod \"rabbitmq-cell1-server-0\" (UID: \"72e8aad9-a325-4eb4-87fa-6b326ceb9a26\") " pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.618829 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:47:56 crc kubenswrapper[4751]: I0227 17:47:56.900904 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 27 17:47:57 crc kubenswrapper[4751]: I0227 17:47:57.206383 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"72e8aad9-a325-4eb4-87fa-6b326ceb9a26","Type":"ContainerStarted","Data":"734a1327645127d4947478417877891a1ddc55c1a63845093031e57fea12cab4"} Feb 27 17:47:57 crc kubenswrapper[4751]: I0227 17:47:57.207721 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1784ca9a-fc20-4ed5-b770-3f1ea06b7065","Type":"ContainerStarted","Data":"ab00c5abf013fb136b6073bcc6c9b54d56d4b37d7842c405da3e540b55bc2caf"} Feb 27 17:47:58 crc kubenswrapper[4751]: I0227 17:47:58.223549 4751 generic.go:334] "Generic (PLEG): container finished" podID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerID="42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e" exitCode=0 Feb 27 17:47:58 crc kubenswrapper[4751]: I0227 17:47:58.223654 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmdln" event={"ID":"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c","Type":"ContainerDied","Data":"42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e"} Feb 27 17:47:58 crc kubenswrapper[4751]: I0227 17:47:58.226282 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1784ca9a-fc20-4ed5-b770-3f1ea06b7065","Type":"ContainerStarted","Data":"945fb9d94b8d5808fb58ed3422cd1a059d8069689673393afe8873de593d8677"} Feb 27 17:47:58 crc kubenswrapper[4751]: I0227 17:47:58.229929 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"72e8aad9-a325-4eb4-87fa-6b326ceb9a26","Type":"ContainerStarted","Data":"dbaf6afef66c748ea84fa542901ecce56b304845cdebdf36e1dd7799cce8119c"} Feb 27 17:47:59 crc kubenswrapper[4751]: I0227 17:47:59.248298 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmdln" event={"ID":"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c","Type":"ContainerStarted","Data":"4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023"} Feb 27 17:47:59 crc kubenswrapper[4751]: I0227 17:47:59.274799 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mmdln" podStartSLOduration=2.43466946 podStartE2EDuration="1m27.274775678s" podCreationTimestamp="2026-02-27 17:46:32 +0000 UTC" firstStartedPulling="2026-02-27 17:46:33.807271953 +0000 UTC m=+4955.954286410" lastFinishedPulling="2026-02-27 17:47:58.647378181 +0000 UTC m=+5040.794392628" observedRunningTime="2026-02-27 17:47:59.268444312 +0000 UTC m=+5041.415458809" watchObservedRunningTime="2026-02-27 17:47:59.274775678 +0000 UTC m=+5041.421790165" Feb 27 17:48:00 crc kubenswrapper[4751]: I0227 17:48:00.152502 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536908-8tlz6"] Feb 27 17:48:00 crc kubenswrapper[4751]: I0227 17:48:00.154524 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" Feb 27 17:48:00 crc kubenswrapper[4751]: I0227 17:48:00.166980 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536908-8tlz6"] Feb 27 17:48:00 crc kubenswrapper[4751]: I0227 17:48:00.315051 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bzpf\" (UniqueName: \"kubernetes.io/projected/d75ed18f-9cc7-4c8f-9856-130e8b5932f8-kube-api-access-5bzpf\") pod \"auto-csr-approver-29536908-8tlz6\" (UID: \"d75ed18f-9cc7-4c8f-9856-130e8b5932f8\") " pod="openshift-infra/auto-csr-approver-29536908-8tlz6" Feb 27 17:48:00 crc kubenswrapper[4751]: I0227 17:48:00.417063 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bzpf\" (UniqueName: \"kubernetes.io/projected/d75ed18f-9cc7-4c8f-9856-130e8b5932f8-kube-api-access-5bzpf\") pod \"auto-csr-approver-29536908-8tlz6\" (UID: \"d75ed18f-9cc7-4c8f-9856-130e8b5932f8\") " pod="openshift-infra/auto-csr-approver-29536908-8tlz6" Feb 27 17:48:00 crc kubenswrapper[4751]: I0227 17:48:00.442119 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bzpf\" (UniqueName: \"kubernetes.io/projected/d75ed18f-9cc7-4c8f-9856-130e8b5932f8-kube-api-access-5bzpf\") pod \"auto-csr-approver-29536908-8tlz6\" (UID: \"d75ed18f-9cc7-4c8f-9856-130e8b5932f8\") " pod="openshift-infra/auto-csr-approver-29536908-8tlz6" Feb 27 17:48:00 crc kubenswrapper[4751]: I0227 17:48:00.478298 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" Feb 27 17:48:00 crc kubenswrapper[4751]: I0227 17:48:00.876191 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536908-8tlz6"] Feb 27 17:48:00 crc kubenswrapper[4751]: W0227 17:48:00.879961 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd75ed18f_9cc7_4c8f_9856_130e8b5932f8.slice/crio-732936145bf337d13f904cfd06c3a07178ad40eafe4e5749b38c9ec797c0b989 WatchSource:0}: Error finding container 732936145bf337d13f904cfd06c3a07178ad40eafe4e5749b38c9ec797c0b989: Status 404 returned error can't find the container with id 732936145bf337d13f904cfd06c3a07178ad40eafe4e5749b38c9ec797c0b989 Feb 27 17:48:01 crc kubenswrapper[4751]: I0227 17:48:01.266666 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" event={"ID":"d75ed18f-9cc7-4c8f-9856-130e8b5932f8","Type":"ContainerStarted","Data":"732936145bf337d13f904cfd06c3a07178ad40eafe4e5749b38c9ec797c0b989"} Feb 27 17:48:01 crc kubenswrapper[4751]: E0227 17:48:01.786621 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:48:01 crc kubenswrapper[4751]: E0227 17:48:01.786816 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:48:01 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:48:01 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5bzpf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536908-8tlz6_openshift-infra(d75ed18f-9cc7-4c8f-9856-130e8b5932f8): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:48:01 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:48:01 crc kubenswrapper[4751]: E0227 17:48:01.788075 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" Feb 27 17:48:02 crc kubenswrapper[4751]: E0227 17:48:02.279556 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" Feb 27 17:48:02 crc kubenswrapper[4751]: I0227 17:48:02.603967 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:48:02 crc kubenswrapper[4751]: I0227 17:48:02.604049 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:48:02 crc kubenswrapper[4751]: I0227 17:48:02.669173 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:48:03 crc kubenswrapper[4751]: I0227 17:48:03.369806 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:48:03 crc kubenswrapper[4751]: I0227 17:48:03.512816 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmdln"] Feb 27 17:48:05 crc kubenswrapper[4751]: I0227 17:48:05.305711 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mmdln" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerName="registry-server" containerID="cri-o://4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023" gracePeriod=2 Feb 27 17:48:05 crc kubenswrapper[4751]: E0227 17:48:05.522823 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:48:05 crc kubenswrapper[4751]: I0227 17:48:05.775587 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:48:05 crc kubenswrapper[4751]: I0227 17:48:05.922532 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kczvp\" (UniqueName: \"kubernetes.io/projected/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-kube-api-access-kczvp\") pod \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " Feb 27 17:48:05 crc kubenswrapper[4751]: I0227 17:48:05.922670 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-utilities\") pod \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " Feb 27 17:48:05 crc kubenswrapper[4751]: I0227 17:48:05.922917 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-catalog-content\") pod \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\" (UID: \"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c\") " Feb 27 17:48:05 crc kubenswrapper[4751]: I0227 17:48:05.924395 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-utilities" (OuterVolumeSpecName: "utilities") pod "028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" (UID: "028097b5-dd4b-4860-bbe4-aa3cb5a79c2c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:48:05 crc kubenswrapper[4751]: I0227 17:48:05.930022 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-kube-api-access-kczvp" (OuterVolumeSpecName: "kube-api-access-kczvp") pod "028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" (UID: "028097b5-dd4b-4860-bbe4-aa3cb5a79c2c"). InnerVolumeSpecName "kube-api-access-kczvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:48:05 crc kubenswrapper[4751]: I0227 17:48:05.958209 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" (UID: "028097b5-dd4b-4860-bbe4-aa3cb5a79c2c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.025247 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.025305 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kczvp\" (UniqueName: \"kubernetes.io/projected/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-kube-api-access-kczvp\") on node \"crc\" DevicePath \"\"" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.025326 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.317842 4751 generic.go:334] "Generic (PLEG): container finished" podID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerID="4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023" exitCode=0 Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.317928 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmdln" event={"ID":"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c","Type":"ContainerDied","Data":"4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023"} Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.318013 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmdln" event={"ID":"028097b5-dd4b-4860-bbe4-aa3cb5a79c2c","Type":"ContainerDied","Data":"7f2d30b0f903bc64472467d838e58af8131a51631880e4565847a8c44d3a5c87"} Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.318048 4751 scope.go:117] "RemoveContainer" containerID="4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.317935 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmdln" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.350649 4751 scope.go:117] "RemoveContainer" containerID="42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.382635 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmdln"] Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.397069 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmdln"] Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.405395 4751 scope.go:117] "RemoveContainer" containerID="4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.429168 4751 scope.go:117] "RemoveContainer" containerID="4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023" Feb 27 17:48:06 crc kubenswrapper[4751]: E0227 17:48:06.429770 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023\": container with ID starting with 4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023 not found: ID does not exist" containerID="4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.429803 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023"} err="failed to get container status \"4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023\": rpc error: code = NotFound desc = could not find container \"4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023\": container with ID starting with 4fe27dbf8a4cb491ec551347e9f8f1e612699081f97c7407b65b02a11733a023 not found: ID does not exist" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.429823 4751 scope.go:117] "RemoveContainer" containerID="42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e" Feb 27 17:48:06 crc kubenswrapper[4751]: E0227 17:48:06.430182 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e\": container with ID starting with 42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e not found: ID does not exist" containerID="42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.430208 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e"} err="failed to get container status \"42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e\": rpc error: code = NotFound desc = could not find container \"42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e\": container with ID starting with 42ec1fd65c2fef96caa0b072b89e7368f4064966e26df4060a104aae107f403e not found: ID does not exist" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.430222 4751 scope.go:117] "RemoveContainer" containerID="4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c" Feb 27 17:48:06 crc kubenswrapper[4751]: E0227 17:48:06.430629 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c\": container with ID starting with 4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c not found: ID does not exist" containerID="4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.430660 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c"} err="failed to get container status \"4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c\": rpc error: code = NotFound desc = could not find container \"4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c\": container with ID starting with 4fcb4e69b86c9fb78ba2cae4711ab52b0afbccfd42f13434c8b9d1cc41f8ad2c not found: ID does not exist" Feb 27 17:48:06 crc kubenswrapper[4751]: I0227 17:48:06.530262 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" path="/var/lib/kubelet/pods/028097b5-dd4b-4860-bbe4-aa3cb5a79c2c/volumes" Feb 27 17:48:15 crc kubenswrapper[4751]: E0227 17:48:15.585092 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:48:15 crc kubenswrapper[4751]: E0227 17:48:15.586001 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:48:15 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:48:15 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5bzpf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536908-8tlz6_openshift-infra(d75ed18f-9cc7-4c8f-9856-130e8b5932f8): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:48:15 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:48:15 crc kubenswrapper[4751]: E0227 17:48:15.587235 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" Feb 27 17:48:20 crc kubenswrapper[4751]: E0227 17:48:20.528012 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:48:30 crc kubenswrapper[4751]: E0227 17:48:30.524547 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" Feb 27 17:48:30 crc kubenswrapper[4751]: I0227 17:48:30.590172 4751 generic.go:334] "Generic (PLEG): container finished" podID="72e8aad9-a325-4eb4-87fa-6b326ceb9a26" containerID="dbaf6afef66c748ea84fa542901ecce56b304845cdebdf36e1dd7799cce8119c" exitCode=0 Feb 27 17:48:30 crc kubenswrapper[4751]: I0227 17:48:30.590240 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"72e8aad9-a325-4eb4-87fa-6b326ceb9a26","Type":"ContainerDied","Data":"dbaf6afef66c748ea84fa542901ecce56b304845cdebdf36e1dd7799cce8119c"} Feb 27 17:48:31 crc kubenswrapper[4751]: I0227 17:48:31.601620 4751 generic.go:334] "Generic (PLEG): container finished" podID="1784ca9a-fc20-4ed5-b770-3f1ea06b7065" containerID="945fb9d94b8d5808fb58ed3422cd1a059d8069689673393afe8873de593d8677" exitCode=0 Feb 27 17:48:31 crc kubenswrapper[4751]: I0227 17:48:31.601704 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1784ca9a-fc20-4ed5-b770-3f1ea06b7065","Type":"ContainerDied","Data":"945fb9d94b8d5808fb58ed3422cd1a059d8069689673393afe8873de593d8677"} Feb 27 17:48:31 crc kubenswrapper[4751]: I0227 17:48:31.604543 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"72e8aad9-a325-4eb4-87fa-6b326ceb9a26","Type":"ContainerStarted","Data":"167a1c9f1ece6291f0f2904ef867126f43eadb5d9b16485fe649dc0c3b078ccc"} Feb 27 17:48:31 crc kubenswrapper[4751]: I0227 17:48:31.604836 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:48:31 crc kubenswrapper[4751]: I0227 17:48:31.668946 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=35.668917733 podStartE2EDuration="35.668917733s" podCreationTimestamp="2026-02-27 17:47:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:48:31.660969955 +0000 UTC m=+5073.807984402" watchObservedRunningTime="2026-02-27 17:48:31.668917733 +0000 UTC m=+5073.815932220" Feb 27 17:48:32 crc kubenswrapper[4751]: I0227 17:48:32.614590 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1784ca9a-fc20-4ed5-b770-3f1ea06b7065","Type":"ContainerStarted","Data":"7d15ac6012b07258578bf3590a858cb506647707741208867e02b01de44ef77a"} Feb 27 17:48:32 crc kubenswrapper[4751]: I0227 17:48:32.614944 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Feb 27 17:48:32 crc kubenswrapper[4751]: I0227 17:48:32.641509 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.641480771 podStartE2EDuration="37.641480771s" podCreationTimestamp="2026-02-27 17:47:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:48:32.639323444 +0000 UTC m=+5074.786337901" watchObservedRunningTime="2026-02-27 17:48:32.641480771 +0000 UTC m=+5074.788495258" Feb 27 17:48:33 crc kubenswrapper[4751]: E0227 17:48:33.524047 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:48:43 crc kubenswrapper[4751]: E0227 17:48:43.399274 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:48:43 crc kubenswrapper[4751]: E0227 17:48:43.399856 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:48:43 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:48:43 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5bzpf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536908-8tlz6_openshift-infra(d75ed18f-9cc7-4c8f-9856-130e8b5932f8): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:48:43 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:48:43 crc kubenswrapper[4751]: E0227 17:48:43.401026 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" Feb 27 17:48:46 crc kubenswrapper[4751]: I0227 17:48:46.019535 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Feb 27 17:48:46 crc kubenswrapper[4751]: I0227 17:48:46.624875 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Feb 27 17:48:47 crc kubenswrapper[4751]: E0227 17:48:47.523537 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.697789 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Feb 27 17:48:53 crc kubenswrapper[4751]: E0227 17:48:53.698655 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerName="registry-server" Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.698682 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerName="registry-server" Feb 27 17:48:53 crc kubenswrapper[4751]: E0227 17:48:53.698698 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerName="extract-content" Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.698705 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerName="extract-content" Feb 27 17:48:53 crc kubenswrapper[4751]: E0227 17:48:53.698714 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerName="extract-utilities" Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.698720 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerName="extract-utilities" Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.698910 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="028097b5-dd4b-4860-bbe4-aa3cb5a79c2c" containerName="registry-server" Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.699500 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.705891 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-g2c9q" Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.719336 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.841774 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk8rf\" (UniqueName: \"kubernetes.io/projected/7ac73047-712f-49c0-87ee-c27d787c7b58-kube-api-access-rk8rf\") pod \"mariadb-client\" (UID: \"7ac73047-712f-49c0-87ee-c27d787c7b58\") " pod="openstack/mariadb-client" Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.944119 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk8rf\" (UniqueName: \"kubernetes.io/projected/7ac73047-712f-49c0-87ee-c27d787c7b58-kube-api-access-rk8rf\") pod \"mariadb-client\" (UID: \"7ac73047-712f-49c0-87ee-c27d787c7b58\") " pod="openstack/mariadb-client" Feb 27 17:48:53 crc kubenswrapper[4751]: I0227 17:48:53.977791 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk8rf\" (UniqueName: \"kubernetes.io/projected/7ac73047-712f-49c0-87ee-c27d787c7b58-kube-api-access-rk8rf\") pod \"mariadb-client\" (UID: \"7ac73047-712f-49c0-87ee-c27d787c7b58\") " pod="openstack/mariadb-client" Feb 27 17:48:54 crc kubenswrapper[4751]: I0227 17:48:54.019513 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Feb 27 17:48:54 crc kubenswrapper[4751]: I0227 17:48:54.456431 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Feb 27 17:48:54 crc kubenswrapper[4751]: I0227 17:48:54.830323 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"7ac73047-712f-49c0-87ee-c27d787c7b58","Type":"ContainerStarted","Data":"8f1cc5d095b1e53234cd149e63fc8caf74fdb0464ff2bd0562acf9d4880cc52e"} Feb 27 17:48:54 crc kubenswrapper[4751]: I0227 17:48:54.830739 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"7ac73047-712f-49c0-87ee-c27d787c7b58","Type":"ContainerStarted","Data":"4c7e5ba169ad69cfffab81af2ac214a06b5bc494cb0f6507ab0c83ffadec9106"} Feb 27 17:48:54 crc kubenswrapper[4751]: I0227 17:48:54.853125 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client" podStartSLOduration=1.853104499 podStartE2EDuration="1.853104499s" podCreationTimestamp="2026-02-27 17:48:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 17:48:54.847605545 +0000 UTC m=+5096.994620032" watchObservedRunningTime="2026-02-27 17:48:54.853104499 +0000 UTC m=+5097.000118976" Feb 27 17:48:55 crc kubenswrapper[4751]: E0227 17:48:55.522916 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" Feb 27 17:48:58 crc kubenswrapper[4751]: I0227 17:48:58.918509 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:48:58 crc kubenswrapper[4751]: I0227 17:48:58.919148 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:49:02 crc kubenswrapper[4751]: E0227 17:49:02.524454 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:49:08 crc kubenswrapper[4751]: E0227 17:49:08.533009 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" Feb 27 17:49:11 crc kubenswrapper[4751]: I0227 17:49:11.406685 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Feb 27 17:49:11 crc kubenswrapper[4751]: I0227 17:49:11.407223 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mariadb-client" podUID="7ac73047-712f-49c0-87ee-c27d787c7b58" containerName="mariadb-client" containerID="cri-o://8f1cc5d095b1e53234cd149e63fc8caf74fdb0464ff2bd0562acf9d4880cc52e" gracePeriod=30 Feb 27 17:49:12 crc kubenswrapper[4751]: I0227 17:49:12.013319 4751 generic.go:334] "Generic (PLEG): container finished" podID="7ac73047-712f-49c0-87ee-c27d787c7b58" containerID="8f1cc5d095b1e53234cd149e63fc8caf74fdb0464ff2bd0562acf9d4880cc52e" exitCode=143 Feb 27 17:49:12 crc kubenswrapper[4751]: I0227 17:49:12.013388 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"7ac73047-712f-49c0-87ee-c27d787c7b58","Type":"ContainerDied","Data":"8f1cc5d095b1e53234cd149e63fc8caf74fdb0464ff2bd0562acf9d4880cc52e"} Feb 27 17:49:12 crc kubenswrapper[4751]: I0227 17:49:12.013474 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"7ac73047-712f-49c0-87ee-c27d787c7b58","Type":"ContainerDied","Data":"4c7e5ba169ad69cfffab81af2ac214a06b5bc494cb0f6507ab0c83ffadec9106"} Feb 27 17:49:12 crc kubenswrapper[4751]: I0227 17:49:12.013494 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c7e5ba169ad69cfffab81af2ac214a06b5bc494cb0f6507ab0c83ffadec9106" Feb 27 17:49:12 crc kubenswrapper[4751]: I0227 17:49:12.036072 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Feb 27 17:49:12 crc kubenswrapper[4751]: I0227 17:49:12.158244 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rk8rf\" (UniqueName: \"kubernetes.io/projected/7ac73047-712f-49c0-87ee-c27d787c7b58-kube-api-access-rk8rf\") pod \"7ac73047-712f-49c0-87ee-c27d787c7b58\" (UID: \"7ac73047-712f-49c0-87ee-c27d787c7b58\") " Feb 27 17:49:12 crc kubenswrapper[4751]: I0227 17:49:12.168929 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ac73047-712f-49c0-87ee-c27d787c7b58-kube-api-access-rk8rf" (OuterVolumeSpecName: "kube-api-access-rk8rf") pod "7ac73047-712f-49c0-87ee-c27d787c7b58" (UID: "7ac73047-712f-49c0-87ee-c27d787c7b58"). InnerVolumeSpecName "kube-api-access-rk8rf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:49:12 crc kubenswrapper[4751]: I0227 17:49:12.261237 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rk8rf\" (UniqueName: \"kubernetes.io/projected/7ac73047-712f-49c0-87ee-c27d787c7b58-kube-api-access-rk8rf\") on node \"crc\" DevicePath \"\"" Feb 27 17:49:13 crc kubenswrapper[4751]: I0227 17:49:13.023276 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Feb 27 17:49:13 crc kubenswrapper[4751]: I0227 17:49:13.064864 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Feb 27 17:49:13 crc kubenswrapper[4751]: I0227 17:49:13.075344 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Feb 27 17:49:13 crc kubenswrapper[4751]: E0227 17:49:13.523242 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:49:14 crc kubenswrapper[4751]: I0227 17:49:14.546189 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ac73047-712f-49c0-87ee-c27d787c7b58" path="/var/lib/kubelet/pods/7ac73047-712f-49c0-87ee-c27d787c7b58/volumes" Feb 27 17:49:15 crc kubenswrapper[4751]: I0227 17:49:15.932820 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cnf5p"] Feb 27 17:49:15 crc kubenswrapper[4751]: E0227 17:49:15.933337 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ac73047-712f-49c0-87ee-c27d787c7b58" containerName="mariadb-client" Feb 27 17:49:15 crc kubenswrapper[4751]: I0227 17:49:15.933358 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ac73047-712f-49c0-87ee-c27d787c7b58" containerName="mariadb-client" Feb 27 17:49:15 crc kubenswrapper[4751]: I0227 17:49:15.933645 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ac73047-712f-49c0-87ee-c27d787c7b58" containerName="mariadb-client" Feb 27 17:49:15 crc kubenswrapper[4751]: I0227 17:49:15.935531 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:15 crc kubenswrapper[4751]: I0227 17:49:15.954069 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cnf5p"] Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.035441 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-utilities\") pod \"community-operators-cnf5p\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.035515 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-catalog-content\") pod \"community-operators-cnf5p\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.035564 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zndjq\" (UniqueName: \"kubernetes.io/projected/6ce11417-12ae-4a50-88b6-ee9979c28760-kube-api-access-zndjq\") pod \"community-operators-cnf5p\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.139453 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-utilities\") pod \"community-operators-cnf5p\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.139581 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-catalog-content\") pod \"community-operators-cnf5p\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.139674 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zndjq\" (UniqueName: \"kubernetes.io/projected/6ce11417-12ae-4a50-88b6-ee9979c28760-kube-api-access-zndjq\") pod \"community-operators-cnf5p\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.140472 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-utilities\") pod \"community-operators-cnf5p\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.140689 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-catalog-content\") pod \"community-operators-cnf5p\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.165045 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zndjq\" (UniqueName: \"kubernetes.io/projected/6ce11417-12ae-4a50-88b6-ee9979c28760-kube-api-access-zndjq\") pod \"community-operators-cnf5p\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.262026 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:16 crc kubenswrapper[4751]: I0227 17:49:16.591698 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cnf5p"] Feb 27 17:49:17 crc kubenswrapper[4751]: I0227 17:49:17.060597 4751 generic.go:334] "Generic (PLEG): container finished" podID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerID="f3af6bc7fef182533fe6beae5522fcbd427721ac842f987c5fd52ab62bca2e14" exitCode=0 Feb 27 17:49:17 crc kubenswrapper[4751]: I0227 17:49:17.060649 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnf5p" event={"ID":"6ce11417-12ae-4a50-88b6-ee9979c28760","Type":"ContainerDied","Data":"f3af6bc7fef182533fe6beae5522fcbd427721ac842f987c5fd52ab62bca2e14"} Feb 27 17:49:17 crc kubenswrapper[4751]: I0227 17:49:17.061792 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnf5p" event={"ID":"6ce11417-12ae-4a50-88b6-ee9979c28760","Type":"ContainerStarted","Data":"cd431cb00389d2d48390fff0afadb07f082b6cc961064b2ade26f153a9618d89"} Feb 27 17:49:19 crc kubenswrapper[4751]: I0227 17:49:19.081905 4751 generic.go:334] "Generic (PLEG): container finished" podID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerID="ee6fa25d48c3a1116ddc5a81ace62f41aafb28792c228f7f9398bde5c6b2399b" exitCode=0 Feb 27 17:49:19 crc kubenswrapper[4751]: I0227 17:49:19.081965 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnf5p" event={"ID":"6ce11417-12ae-4a50-88b6-ee9979c28760","Type":"ContainerDied","Data":"ee6fa25d48c3a1116ddc5a81ace62f41aafb28792c228f7f9398bde5c6b2399b"} Feb 27 17:49:20 crc kubenswrapper[4751]: I0227 17:49:20.096040 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnf5p" event={"ID":"6ce11417-12ae-4a50-88b6-ee9979c28760","Type":"ContainerStarted","Data":"3101f971f2db6c0f58fb0abb010e93e8c83a491146666bda410beaf06ba47dae"} Feb 27 17:49:20 crc kubenswrapper[4751]: I0227 17:49:20.125380 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cnf5p" podStartSLOduration=2.656140249 podStartE2EDuration="5.12536385s" podCreationTimestamp="2026-02-27 17:49:15 +0000 UTC" firstStartedPulling="2026-02-27 17:49:17.06232922 +0000 UTC m=+5119.209343657" lastFinishedPulling="2026-02-27 17:49:19.531552771 +0000 UTC m=+5121.678567258" observedRunningTime="2026-02-27 17:49:20.123088561 +0000 UTC m=+5122.270103018" watchObservedRunningTime="2026-02-27 17:49:20.12536385 +0000 UTC m=+5122.272378297" Feb 27 17:49:22 crc kubenswrapper[4751]: E0227 17:49:22.523375 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" Feb 27 17:49:24 crc kubenswrapper[4751]: E0227 17:49:24.523698 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:49:26 crc kubenswrapper[4751]: I0227 17:49:26.262997 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:26 crc kubenswrapper[4751]: I0227 17:49:26.263100 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:26 crc kubenswrapper[4751]: I0227 17:49:26.347173 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:27 crc kubenswrapper[4751]: I0227 17:49:27.241178 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:27 crc kubenswrapper[4751]: I0227 17:49:27.313594 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cnf5p"] Feb 27 17:49:28 crc kubenswrapper[4751]: I0227 17:49:28.918348 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:49:28 crc kubenswrapper[4751]: I0227 17:49:28.918483 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:49:29 crc kubenswrapper[4751]: I0227 17:49:29.191553 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cnf5p" podUID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerName="registry-server" containerID="cri-o://3101f971f2db6c0f58fb0abb010e93e8c83a491146666bda410beaf06ba47dae" gracePeriod=2 Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.205701 4751 generic.go:334] "Generic (PLEG): container finished" podID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerID="3101f971f2db6c0f58fb0abb010e93e8c83a491146666bda410beaf06ba47dae" exitCode=0 Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.205832 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnf5p" event={"ID":"6ce11417-12ae-4a50-88b6-ee9979c28760","Type":"ContainerDied","Data":"3101f971f2db6c0f58fb0abb010e93e8c83a491146666bda410beaf06ba47dae"} Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.312105 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.406436 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-catalog-content\") pod \"6ce11417-12ae-4a50-88b6-ee9979c28760\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.406481 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-utilities\") pod \"6ce11417-12ae-4a50-88b6-ee9979c28760\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.406563 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zndjq\" (UniqueName: \"kubernetes.io/projected/6ce11417-12ae-4a50-88b6-ee9979c28760-kube-api-access-zndjq\") pod \"6ce11417-12ae-4a50-88b6-ee9979c28760\" (UID: \"6ce11417-12ae-4a50-88b6-ee9979c28760\") " Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.408972 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-utilities" (OuterVolumeSpecName: "utilities") pod "6ce11417-12ae-4a50-88b6-ee9979c28760" (UID: "6ce11417-12ae-4a50-88b6-ee9979c28760"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.417997 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ce11417-12ae-4a50-88b6-ee9979c28760-kube-api-access-zndjq" (OuterVolumeSpecName: "kube-api-access-zndjq") pod "6ce11417-12ae-4a50-88b6-ee9979c28760" (UID: "6ce11417-12ae-4a50-88b6-ee9979c28760"). InnerVolumeSpecName "kube-api-access-zndjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.479911 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6ce11417-12ae-4a50-88b6-ee9979c28760" (UID: "6ce11417-12ae-4a50-88b6-ee9979c28760"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.508657 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.508719 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ce11417-12ae-4a50-88b6-ee9979c28760-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:49:30 crc kubenswrapper[4751]: I0227 17:49:30.508747 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zndjq\" (UniqueName: \"kubernetes.io/projected/6ce11417-12ae-4a50-88b6-ee9979c28760-kube-api-access-zndjq\") on node \"crc\" DevicePath \"\"" Feb 27 17:49:31 crc kubenswrapper[4751]: I0227 17:49:31.215322 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnf5p" event={"ID":"6ce11417-12ae-4a50-88b6-ee9979c28760","Type":"ContainerDied","Data":"cd431cb00389d2d48390fff0afadb07f082b6cc961064b2ade26f153a9618d89"} Feb 27 17:49:31 crc kubenswrapper[4751]: I0227 17:49:31.215629 4751 scope.go:117] "RemoveContainer" containerID="3101f971f2db6c0f58fb0abb010e93e8c83a491146666bda410beaf06ba47dae" Feb 27 17:49:31 crc kubenswrapper[4751]: I0227 17:49:31.215387 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cnf5p" Feb 27 17:49:31 crc kubenswrapper[4751]: I0227 17:49:31.234295 4751 scope.go:117] "RemoveContainer" containerID="ee6fa25d48c3a1116ddc5a81ace62f41aafb28792c228f7f9398bde5c6b2399b" Feb 27 17:49:31 crc kubenswrapper[4751]: I0227 17:49:31.242038 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cnf5p"] Feb 27 17:49:31 crc kubenswrapper[4751]: I0227 17:49:31.249505 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cnf5p"] Feb 27 17:49:31 crc kubenswrapper[4751]: I0227 17:49:31.521646 4751 scope.go:117] "RemoveContainer" containerID="f3af6bc7fef182533fe6beae5522fcbd427721ac842f987c5fd52ab62bca2e14" Feb 27 17:49:32 crc kubenswrapper[4751]: I0227 17:49:32.541699 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ce11417-12ae-4a50-88b6-ee9979c28760" path="/var/lib/kubelet/pods/6ce11417-12ae-4a50-88b6-ee9979c28760/volumes" Feb 27 17:49:37 crc kubenswrapper[4751]: I0227 17:49:37.273016 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" event={"ID":"d75ed18f-9cc7-4c8f-9856-130e8b5932f8","Type":"ContainerStarted","Data":"0a2bdbcce9bd97aef4be3ddbc8bcdbb443bf0cd04528eeacf1e4292b7c0a3762"} Feb 27 17:49:37 crc kubenswrapper[4751]: I0227 17:49:37.303117 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" podStartSLOduration=1.3442327920000001 podStartE2EDuration="1m37.303089738s" podCreationTimestamp="2026-02-27 17:48:00 +0000 UTC" firstStartedPulling="2026-02-27 17:48:00.885014702 +0000 UTC m=+5043.032029149" lastFinishedPulling="2026-02-27 17:49:36.843871638 +0000 UTC m=+5138.990886095" observedRunningTime="2026-02-27 17:49:37.293460877 +0000 UTC m=+5139.440475364" watchObservedRunningTime="2026-02-27 17:49:37.303089738 +0000 UTC m=+5139.450104215" Feb 27 17:49:38 crc kubenswrapper[4751]: I0227 17:49:38.292380 4751 generic.go:334] "Generic (PLEG): container finished" podID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" containerID="0a2bdbcce9bd97aef4be3ddbc8bcdbb443bf0cd04528eeacf1e4292b7c0a3762" exitCode=0 Feb 27 17:49:38 crc kubenswrapper[4751]: I0227 17:49:38.292447 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" event={"ID":"d75ed18f-9cc7-4c8f-9856-130e8b5932f8","Type":"ContainerDied","Data":"0a2bdbcce9bd97aef4be3ddbc8bcdbb443bf0cd04528eeacf1e4292b7c0a3762"} Feb 27 17:49:38 crc kubenswrapper[4751]: E0227 17:49:38.532555 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" Feb 27 17:49:39 crc kubenswrapper[4751]: I0227 17:49:39.590238 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" Feb 27 17:49:39 crc kubenswrapper[4751]: I0227 17:49:39.782716 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bzpf\" (UniqueName: \"kubernetes.io/projected/d75ed18f-9cc7-4c8f-9856-130e8b5932f8-kube-api-access-5bzpf\") pod \"d75ed18f-9cc7-4c8f-9856-130e8b5932f8\" (UID: \"d75ed18f-9cc7-4c8f-9856-130e8b5932f8\") " Feb 27 17:49:39 crc kubenswrapper[4751]: I0227 17:49:39.789652 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d75ed18f-9cc7-4c8f-9856-130e8b5932f8-kube-api-access-5bzpf" (OuterVolumeSpecName: "kube-api-access-5bzpf") pod "d75ed18f-9cc7-4c8f-9856-130e8b5932f8" (UID: "d75ed18f-9cc7-4c8f-9856-130e8b5932f8"). InnerVolumeSpecName "kube-api-access-5bzpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:49:39 crc kubenswrapper[4751]: I0227 17:49:39.885398 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bzpf\" (UniqueName: \"kubernetes.io/projected/d75ed18f-9cc7-4c8f-9856-130e8b5932f8-kube-api-access-5bzpf\") on node \"crc\" DevicePath \"\"" Feb 27 17:49:40 crc kubenswrapper[4751]: I0227 17:49:40.313005 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" event={"ID":"d75ed18f-9cc7-4c8f-9856-130e8b5932f8","Type":"ContainerDied","Data":"732936145bf337d13f904cfd06c3a07178ad40eafe4e5749b38c9ec797c0b989"} Feb 27 17:49:40 crc kubenswrapper[4751]: I0227 17:49:40.313333 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="732936145bf337d13f904cfd06c3a07178ad40eafe4e5749b38c9ec797c0b989" Feb 27 17:49:40 crc kubenswrapper[4751]: I0227 17:49:40.313075 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536908-8tlz6" Feb 27 17:49:40 crc kubenswrapper[4751]: I0227 17:49:40.399029 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536902-ljzg6"] Feb 27 17:49:40 crc kubenswrapper[4751]: I0227 17:49:40.410599 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536902-ljzg6"] Feb 27 17:49:40 crc kubenswrapper[4751]: I0227 17:49:40.534747 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0a39a0b-091c-4aa0-b6e3-400c70578537" path="/var/lib/kubelet/pods/b0a39a0b-091c-4aa0-b6e3-400c70578537/volumes" Feb 27 17:49:52 crc kubenswrapper[4751]: I0227 17:49:52.401732 4751 scope.go:117] "RemoveContainer" containerID="f57fa567b1d9956af2b58c3c1617952b20c3b9d7afadb082e9f693c70f168fa2" Feb 27 17:49:52 crc kubenswrapper[4751]: I0227 17:49:52.453087 4751 generic.go:334] "Generic (PLEG): container finished" podID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" containerID="12b13674f4fcd487f8ec225c4dc5f8935bd1f3e6516dd84e58bb8a5eec8d3b8d" exitCode=0 Feb 27 17:49:52 crc kubenswrapper[4751]: I0227 17:49:52.453148 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" event={"ID":"bebb76fd-da0e-4c76-a3fe-1c31a40256fc","Type":"ContainerDied","Data":"12b13674f4fcd487f8ec225c4dc5f8935bd1f3e6516dd84e58bb8a5eec8d3b8d"} Feb 27 17:49:52 crc kubenswrapper[4751]: I0227 17:49:52.468144 4751 scope.go:117] "RemoveContainer" containerID="f70d73883f0010cfa2c91fa8333825ba74e886ac362593b2c70b10ecac4ba886" Feb 27 17:49:53 crc kubenswrapper[4751]: I0227 17:49:53.923745 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" Feb 27 17:49:54 crc kubenswrapper[4751]: I0227 17:49:54.043618 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlxqx\" (UniqueName: \"kubernetes.io/projected/bebb76fd-da0e-4c76-a3fe-1c31a40256fc-kube-api-access-jlxqx\") pod \"bebb76fd-da0e-4c76-a3fe-1c31a40256fc\" (UID: \"bebb76fd-da0e-4c76-a3fe-1c31a40256fc\") " Feb 27 17:49:54 crc kubenswrapper[4751]: I0227 17:49:54.051186 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bebb76fd-da0e-4c76-a3fe-1c31a40256fc-kube-api-access-jlxqx" (OuterVolumeSpecName: "kube-api-access-jlxqx") pod "bebb76fd-da0e-4c76-a3fe-1c31a40256fc" (UID: "bebb76fd-da0e-4c76-a3fe-1c31a40256fc"). InnerVolumeSpecName "kube-api-access-jlxqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:49:54 crc kubenswrapper[4751]: I0227 17:49:54.146103 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlxqx\" (UniqueName: \"kubernetes.io/projected/bebb76fd-da0e-4c76-a3fe-1c31a40256fc-kube-api-access-jlxqx\") on node \"crc\" DevicePath \"\"" Feb 27 17:49:54 crc kubenswrapper[4751]: I0227 17:49:54.478735 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" event={"ID":"bebb76fd-da0e-4c76-a3fe-1c31a40256fc","Type":"ContainerDied","Data":"fe3bf064df12fd8544e528feafa8a344b80ecced8dccc568d73e2b85f03fa772"} Feb 27 17:49:54 crc kubenswrapper[4751]: I0227 17:49:54.478812 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe3bf064df12fd8544e528feafa8a344b80ecced8dccc568d73e2b85f03fa772" Feb 27 17:49:54 crc kubenswrapper[4751]: I0227 17:49:54.478811 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536898-x7x5b" Feb 27 17:49:55 crc kubenswrapper[4751]: I0227 17:49:55.010611 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536898-x7x5b"] Feb 27 17:49:55 crc kubenswrapper[4751]: I0227 17:49:55.019582 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536898-x7x5b"] Feb 27 17:49:56 crc kubenswrapper[4751]: I0227 17:49:56.537383 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" path="/var/lib/kubelet/pods/bebb76fd-da0e-4c76-a3fe-1c31a40256fc/volumes" Feb 27 17:49:58 crc kubenswrapper[4751]: I0227 17:49:58.917936 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:49:58 crc kubenswrapper[4751]: I0227 17:49:58.918304 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:49:58 crc kubenswrapper[4751]: I0227 17:49:58.918383 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 17:49:58 crc kubenswrapper[4751]: I0227 17:49:58.919606 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f6933fc985518da96693a8b2d200d4e49145e296c6cdf1cf2e8ca5bb1a0e9524"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 17:49:58 crc kubenswrapper[4751]: I0227 17:49:58.919722 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://f6933fc985518da96693a8b2d200d4e49145e296c6cdf1cf2e8ca5bb1a0e9524" gracePeriod=600 Feb 27 17:49:59 crc kubenswrapper[4751]: I0227 17:49:59.535712 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="f6933fc985518da96693a8b2d200d4e49145e296c6cdf1cf2e8ca5bb1a0e9524" exitCode=0 Feb 27 17:49:59 crc kubenswrapper[4751]: I0227 17:49:59.535795 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"f6933fc985518da96693a8b2d200d4e49145e296c6cdf1cf2e8ca5bb1a0e9524"} Feb 27 17:49:59 crc kubenswrapper[4751]: I0227 17:49:59.536069 4751 scope.go:117] "RemoveContainer" containerID="5f0350f523e65104c87d3bed92c6c0e810a4d7cb353201da82ad5dc581948ba6" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.157098 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536910-n5xg2"] Feb 27 17:50:00 crc kubenswrapper[4751]: E0227 17:50:00.157963 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerName="extract-content" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.157988 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerName="extract-content" Feb 27 17:50:00 crc kubenswrapper[4751]: E0227 17:50:00.158015 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerName="extract-utilities" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.158027 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerName="extract-utilities" Feb 27 17:50:00 crc kubenswrapper[4751]: E0227 17:50:00.158049 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" containerName="oc" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.158063 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" containerName="oc" Feb 27 17:50:00 crc kubenswrapper[4751]: E0227 17:50:00.158077 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerName="registry-server" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.158088 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerName="registry-server" Feb 27 17:50:00 crc kubenswrapper[4751]: E0227 17:50:00.158110 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" containerName="oc" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.158122 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" containerName="oc" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.158381 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="bebb76fd-da0e-4c76-a3fe-1c31a40256fc" containerName="oc" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.158430 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" containerName="oc" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.158451 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ce11417-12ae-4a50-88b6-ee9979c28760" containerName="registry-server" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.159127 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536910-n5xg2" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.162238 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.162266 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.162503 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.166687 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536910-n5xg2"] Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.251467 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbqcl\" (UniqueName: \"kubernetes.io/projected/d8e60c4e-c1a0-4310-8275-865935c980a6-kube-api-access-mbqcl\") pod \"auto-csr-approver-29536910-n5xg2\" (UID: \"d8e60c4e-c1a0-4310-8275-865935c980a6\") " pod="openshift-infra/auto-csr-approver-29536910-n5xg2" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.353927 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbqcl\" (UniqueName: \"kubernetes.io/projected/d8e60c4e-c1a0-4310-8275-865935c980a6-kube-api-access-mbqcl\") pod \"auto-csr-approver-29536910-n5xg2\" (UID: \"d8e60c4e-c1a0-4310-8275-865935c980a6\") " pod="openshift-infra/auto-csr-approver-29536910-n5xg2" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.401982 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbqcl\" (UniqueName: \"kubernetes.io/projected/d8e60c4e-c1a0-4310-8275-865935c980a6-kube-api-access-mbqcl\") pod \"auto-csr-approver-29536910-n5xg2\" (UID: \"d8e60c4e-c1a0-4310-8275-865935c980a6\") " pod="openshift-infra/auto-csr-approver-29536910-n5xg2" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.495949 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536910-n5xg2" Feb 27 17:50:00 crc kubenswrapper[4751]: I0227 17:50:00.587588 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f"} Feb 27 17:50:01 crc kubenswrapper[4751]: I0227 17:50:01.160473 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536910-n5xg2"] Feb 27 17:50:01 crc kubenswrapper[4751]: W0227 17:50:01.163653 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8e60c4e_c1a0_4310_8275_865935c980a6.slice/crio-930fff4ecc9f3ee43beab194f564c080b3937f0c484786502e65dc3ea3498981 WatchSource:0}: Error finding container 930fff4ecc9f3ee43beab194f564c080b3937f0c484786502e65dc3ea3498981: Status 404 returned error can't find the container with id 930fff4ecc9f3ee43beab194f564c080b3937f0c484786502e65dc3ea3498981 Feb 27 17:50:01 crc kubenswrapper[4751]: I0227 17:50:01.599972 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536910-n5xg2" event={"ID":"d8e60c4e-c1a0-4310-8275-865935c980a6","Type":"ContainerStarted","Data":"930fff4ecc9f3ee43beab194f564c080b3937f0c484786502e65dc3ea3498981"} Feb 27 17:50:03 crc kubenswrapper[4751]: I0227 17:50:03.626141 4751 generic.go:334] "Generic (PLEG): container finished" podID="d8e60c4e-c1a0-4310-8275-865935c980a6" containerID="4056bc4f102d5da178a8e0031251e7fea1461aa51dca8d2a44f74bc7b2f9c8be" exitCode=0 Feb 27 17:50:03 crc kubenswrapper[4751]: I0227 17:50:03.626442 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536910-n5xg2" event={"ID":"d8e60c4e-c1a0-4310-8275-865935c980a6","Type":"ContainerDied","Data":"4056bc4f102d5da178a8e0031251e7fea1461aa51dca8d2a44f74bc7b2f9c8be"} Feb 27 17:50:05 crc kubenswrapper[4751]: I0227 17:50:05.011682 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536910-n5xg2" Feb 27 17:50:05 crc kubenswrapper[4751]: I0227 17:50:05.151514 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbqcl\" (UniqueName: \"kubernetes.io/projected/d8e60c4e-c1a0-4310-8275-865935c980a6-kube-api-access-mbqcl\") pod \"d8e60c4e-c1a0-4310-8275-865935c980a6\" (UID: \"d8e60c4e-c1a0-4310-8275-865935c980a6\") " Feb 27 17:50:05 crc kubenswrapper[4751]: I0227 17:50:05.172593 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8e60c4e-c1a0-4310-8275-865935c980a6-kube-api-access-mbqcl" (OuterVolumeSpecName: "kube-api-access-mbqcl") pod "d8e60c4e-c1a0-4310-8275-865935c980a6" (UID: "d8e60c4e-c1a0-4310-8275-865935c980a6"). InnerVolumeSpecName "kube-api-access-mbqcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:50:05 crc kubenswrapper[4751]: I0227 17:50:05.253651 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbqcl\" (UniqueName: \"kubernetes.io/projected/d8e60c4e-c1a0-4310-8275-865935c980a6-kube-api-access-mbqcl\") on node \"crc\" DevicePath \"\"" Feb 27 17:50:05 crc kubenswrapper[4751]: I0227 17:50:05.647963 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536910-n5xg2" event={"ID":"d8e60c4e-c1a0-4310-8275-865935c980a6","Type":"ContainerDied","Data":"930fff4ecc9f3ee43beab194f564c080b3937f0c484786502e65dc3ea3498981"} Feb 27 17:50:05 crc kubenswrapper[4751]: I0227 17:50:05.648035 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="930fff4ecc9f3ee43beab194f564c080b3937f0c484786502e65dc3ea3498981" Feb 27 17:50:05 crc kubenswrapper[4751]: I0227 17:50:05.648116 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536910-n5xg2" Feb 27 17:50:06 crc kubenswrapper[4751]: I0227 17:50:06.118142 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536904-bp2m9"] Feb 27 17:50:06 crc kubenswrapper[4751]: I0227 17:50:06.133227 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536904-bp2m9"] Feb 27 17:50:06 crc kubenswrapper[4751]: I0227 17:50:06.537565 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1e31907-3659-412d-8af8-69bd2998180e" path="/var/lib/kubelet/pods/d1e31907-3659-412d-8af8-69bd2998180e/volumes" Feb 27 17:51:52 crc kubenswrapper[4751]: I0227 17:51:52.610131 4751 scope.go:117] "RemoveContainer" containerID="65c481300c7a8c3e70a97cf38785d54403cdeb826ffe23600201431ec01cf0e1" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.169391 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536912-mbp9n"] Feb 27 17:52:00 crc kubenswrapper[4751]: E0227 17:52:00.170661 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8e60c4e-c1a0-4310-8275-865935c980a6" containerName="oc" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.170692 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8e60c4e-c1a0-4310-8275-865935c980a6" containerName="oc" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.171018 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8e60c4e-c1a0-4310-8275-865935c980a6" containerName="oc" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.171890 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536912-mbp9n" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.176196 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.176762 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.177085 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.179437 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536912-mbp9n"] Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.206378 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6rvj\" (UniqueName: \"kubernetes.io/projected/ca69b365-fbc1-4665-9b8b-86ab580acc22-kube-api-access-z6rvj\") pod \"auto-csr-approver-29536912-mbp9n\" (UID: \"ca69b365-fbc1-4665-9b8b-86ab580acc22\") " pod="openshift-infra/auto-csr-approver-29536912-mbp9n" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.307516 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6rvj\" (UniqueName: \"kubernetes.io/projected/ca69b365-fbc1-4665-9b8b-86ab580acc22-kube-api-access-z6rvj\") pod \"auto-csr-approver-29536912-mbp9n\" (UID: \"ca69b365-fbc1-4665-9b8b-86ab580acc22\") " pod="openshift-infra/auto-csr-approver-29536912-mbp9n" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.328241 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6rvj\" (UniqueName: \"kubernetes.io/projected/ca69b365-fbc1-4665-9b8b-86ab580acc22-kube-api-access-z6rvj\") pod \"auto-csr-approver-29536912-mbp9n\" (UID: \"ca69b365-fbc1-4665-9b8b-86ab580acc22\") " pod="openshift-infra/auto-csr-approver-29536912-mbp9n" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.506960 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536912-mbp9n" Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.833750 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536912-mbp9n"] Feb 27 17:52:00 crc kubenswrapper[4751]: W0227 17:52:00.839028 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca69b365_fbc1_4665_9b8b_86ab580acc22.slice/crio-400c50f4aa36b7e7c2ddd236a671a3fa77987a432bc973eab86e0415b4ebd550 WatchSource:0}: Error finding container 400c50f4aa36b7e7c2ddd236a671a3fa77987a432bc973eab86e0415b4ebd550: Status 404 returned error can't find the container with id 400c50f4aa36b7e7c2ddd236a671a3fa77987a432bc973eab86e0415b4ebd550 Feb 27 17:52:00 crc kubenswrapper[4751]: I0227 17:52:00.841772 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:52:01 crc kubenswrapper[4751]: I0227 17:52:01.170715 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536912-mbp9n" event={"ID":"ca69b365-fbc1-4665-9b8b-86ab580acc22","Type":"ContainerStarted","Data":"400c50f4aa36b7e7c2ddd236a671a3fa77987a432bc973eab86e0415b4ebd550"} Feb 27 17:52:02 crc kubenswrapper[4751]: E0227 17:52:02.897904 4751 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca69b365_fbc1_4665_9b8b_86ab580acc22.slice/crio-0afe0469171a8b3a2c628bba9cca2b507ab62f3b75116c98814309cd6103dd5e.scope\": RecentStats: unable to find data in memory cache]" Feb 27 17:52:03 crc kubenswrapper[4751]: I0227 17:52:03.193737 4751 generic.go:334] "Generic (PLEG): container finished" podID="ca69b365-fbc1-4665-9b8b-86ab580acc22" containerID="0afe0469171a8b3a2c628bba9cca2b507ab62f3b75116c98814309cd6103dd5e" exitCode=0 Feb 27 17:52:03 crc kubenswrapper[4751]: I0227 17:52:03.193810 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536912-mbp9n" event={"ID":"ca69b365-fbc1-4665-9b8b-86ab580acc22","Type":"ContainerDied","Data":"0afe0469171a8b3a2c628bba9cca2b507ab62f3b75116c98814309cd6103dd5e"} Feb 27 17:52:04 crc kubenswrapper[4751]: I0227 17:52:04.668960 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536912-mbp9n" Feb 27 17:52:04 crc kubenswrapper[4751]: I0227 17:52:04.682102 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6rvj\" (UniqueName: \"kubernetes.io/projected/ca69b365-fbc1-4665-9b8b-86ab580acc22-kube-api-access-z6rvj\") pod \"ca69b365-fbc1-4665-9b8b-86ab580acc22\" (UID: \"ca69b365-fbc1-4665-9b8b-86ab580acc22\") " Feb 27 17:52:04 crc kubenswrapper[4751]: I0227 17:52:04.688349 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca69b365-fbc1-4665-9b8b-86ab580acc22-kube-api-access-z6rvj" (OuterVolumeSpecName: "kube-api-access-z6rvj") pod "ca69b365-fbc1-4665-9b8b-86ab580acc22" (UID: "ca69b365-fbc1-4665-9b8b-86ab580acc22"). InnerVolumeSpecName "kube-api-access-z6rvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:52:04 crc kubenswrapper[4751]: I0227 17:52:04.783488 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6rvj\" (UniqueName: \"kubernetes.io/projected/ca69b365-fbc1-4665-9b8b-86ab580acc22-kube-api-access-z6rvj\") on node \"crc\" DevicePath \"\"" Feb 27 17:52:05 crc kubenswrapper[4751]: I0227 17:52:05.213695 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536912-mbp9n" event={"ID":"ca69b365-fbc1-4665-9b8b-86ab580acc22","Type":"ContainerDied","Data":"400c50f4aa36b7e7c2ddd236a671a3fa77987a432bc973eab86e0415b4ebd550"} Feb 27 17:52:05 crc kubenswrapper[4751]: I0227 17:52:05.213756 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="400c50f4aa36b7e7c2ddd236a671a3fa77987a432bc973eab86e0415b4ebd550" Feb 27 17:52:05 crc kubenswrapper[4751]: I0227 17:52:05.213766 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536912-mbp9n" Feb 27 17:52:05 crc kubenswrapper[4751]: I0227 17:52:05.756942 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536906-962gp"] Feb 27 17:52:05 crc kubenswrapper[4751]: I0227 17:52:05.766966 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536906-962gp"] Feb 27 17:52:06 crc kubenswrapper[4751]: I0227 17:52:06.537451 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a0d3c7f-3fd8-462c-8ee9-93d7646a154c" path="/var/lib/kubelet/pods/0a0d3c7f-3fd8-462c-8ee9-93d7646a154c/volumes" Feb 27 17:52:07 crc kubenswrapper[4751]: I0227 17:52:07.940517 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2lpgh"] Feb 27 17:52:07 crc kubenswrapper[4751]: E0227 17:52:07.941459 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca69b365-fbc1-4665-9b8b-86ab580acc22" containerName="oc" Feb 27 17:52:07 crc kubenswrapper[4751]: I0227 17:52:07.941477 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca69b365-fbc1-4665-9b8b-86ab580acc22" containerName="oc" Feb 27 17:52:07 crc kubenswrapper[4751]: I0227 17:52:07.941745 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca69b365-fbc1-4665-9b8b-86ab580acc22" containerName="oc" Feb 27 17:52:07 crc kubenswrapper[4751]: I0227 17:52:07.943440 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:07 crc kubenswrapper[4751]: I0227 17:52:07.949500 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2lpgh"] Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.036148 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-utilities\") pod \"redhat-operators-2lpgh\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.036356 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlfs8\" (UniqueName: \"kubernetes.io/projected/b566ef69-a638-4547-b745-5aa46aff96c8-kube-api-access-qlfs8\") pod \"redhat-operators-2lpgh\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.036467 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-catalog-content\") pod \"redhat-operators-2lpgh\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.139575 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlfs8\" (UniqueName: \"kubernetes.io/projected/b566ef69-a638-4547-b745-5aa46aff96c8-kube-api-access-qlfs8\") pod \"redhat-operators-2lpgh\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.139707 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-catalog-content\") pod \"redhat-operators-2lpgh\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.139872 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-utilities\") pod \"redhat-operators-2lpgh\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.140688 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-utilities\") pod \"redhat-operators-2lpgh\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.140750 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-catalog-content\") pod \"redhat-operators-2lpgh\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.170817 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlfs8\" (UniqueName: \"kubernetes.io/projected/b566ef69-a638-4547-b745-5aa46aff96c8-kube-api-access-qlfs8\") pod \"redhat-operators-2lpgh\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.314144 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:08 crc kubenswrapper[4751]: I0227 17:52:08.610539 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2lpgh"] Feb 27 17:52:09 crc kubenswrapper[4751]: I0227 17:52:09.259228 4751 generic.go:334] "Generic (PLEG): container finished" podID="b566ef69-a638-4547-b745-5aa46aff96c8" containerID="217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529" exitCode=0 Feb 27 17:52:09 crc kubenswrapper[4751]: I0227 17:52:09.259324 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lpgh" event={"ID":"b566ef69-a638-4547-b745-5aa46aff96c8","Type":"ContainerDied","Data":"217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529"} Feb 27 17:52:09 crc kubenswrapper[4751]: I0227 17:52:09.259618 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lpgh" event={"ID":"b566ef69-a638-4547-b745-5aa46aff96c8","Type":"ContainerStarted","Data":"d63c2d91f391a8efb45753b4358334d1348194df75125753e8cbd4379f98911d"} Feb 27 17:52:10 crc kubenswrapper[4751]: I0227 17:52:10.271944 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lpgh" event={"ID":"b566ef69-a638-4547-b745-5aa46aff96c8","Type":"ContainerStarted","Data":"467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf"} Feb 27 17:52:11 crc kubenswrapper[4751]: I0227 17:52:11.284052 4751 generic.go:334] "Generic (PLEG): container finished" podID="b566ef69-a638-4547-b745-5aa46aff96c8" containerID="467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf" exitCode=0 Feb 27 17:52:11 crc kubenswrapper[4751]: I0227 17:52:11.284122 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lpgh" event={"ID":"b566ef69-a638-4547-b745-5aa46aff96c8","Type":"ContainerDied","Data":"467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf"} Feb 27 17:52:12 crc kubenswrapper[4751]: I0227 17:52:12.301074 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lpgh" event={"ID":"b566ef69-a638-4547-b745-5aa46aff96c8","Type":"ContainerStarted","Data":"efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e"} Feb 27 17:52:12 crc kubenswrapper[4751]: I0227 17:52:12.333023 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2lpgh" podStartSLOduration=2.782805999 podStartE2EDuration="5.332994626s" podCreationTimestamp="2026-02-27 17:52:07 +0000 UTC" firstStartedPulling="2026-02-27 17:52:09.26169273 +0000 UTC m=+5291.408707187" lastFinishedPulling="2026-02-27 17:52:11.811881367 +0000 UTC m=+5293.958895814" observedRunningTime="2026-02-27 17:52:12.321817084 +0000 UTC m=+5294.468831551" watchObservedRunningTime="2026-02-27 17:52:12.332994626 +0000 UTC m=+5294.480009103" Feb 27 17:52:18 crc kubenswrapper[4751]: I0227 17:52:18.315389 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:18 crc kubenswrapper[4751]: I0227 17:52:18.315760 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:19 crc kubenswrapper[4751]: I0227 17:52:19.377589 4751 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2lpgh" podUID="b566ef69-a638-4547-b745-5aa46aff96c8" containerName="registry-server" probeResult="failure" output=< Feb 27 17:52:19 crc kubenswrapper[4751]: timeout: failed to connect service ":50051" within 1s Feb 27 17:52:19 crc kubenswrapper[4751]: > Feb 27 17:52:28 crc kubenswrapper[4751]: I0227 17:52:28.382751 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:28 crc kubenswrapper[4751]: I0227 17:52:28.457926 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:28 crc kubenswrapper[4751]: I0227 17:52:28.918036 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:52:28 crc kubenswrapper[4751]: I0227 17:52:28.918127 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.249267 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jthcm"] Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.252652 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.260639 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jthcm"] Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.420136 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-utilities\") pod \"certified-operators-jthcm\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.420225 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8qnm\" (UniqueName: \"kubernetes.io/projected/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-kube-api-access-c8qnm\") pod \"certified-operators-jthcm\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.420312 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-catalog-content\") pod \"certified-operators-jthcm\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.522071 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-catalog-content\") pod \"certified-operators-jthcm\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.522336 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-utilities\") pod \"certified-operators-jthcm\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.522452 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8qnm\" (UniqueName: \"kubernetes.io/projected/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-kube-api-access-c8qnm\") pod \"certified-operators-jthcm\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.522787 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-catalog-content\") pod \"certified-operators-jthcm\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.522804 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-utilities\") pod \"certified-operators-jthcm\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.563018 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8qnm\" (UniqueName: \"kubernetes.io/projected/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-kube-api-access-c8qnm\") pod \"certified-operators-jthcm\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.589855 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.631493 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2lpgh"] Feb 27 17:52:30 crc kubenswrapper[4751]: I0227 17:52:30.631745 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2lpgh" podUID="b566ef69-a638-4547-b745-5aa46aff96c8" containerName="registry-server" containerID="cri-o://efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e" gracePeriod=2 Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.044911 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.077947 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jthcm"] Feb 27 17:52:31 crc kubenswrapper[4751]: W0227 17:52:31.078752 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e2f6e96_00c1_4f5f_969b_c2dc1441a474.slice/crio-f01175ec24006c21229eeea8bf60bb892e07c52668fd370e4a25b807aae1bb1f WatchSource:0}: Error finding container f01175ec24006c21229eeea8bf60bb892e07c52668fd370e4a25b807aae1bb1f: Status 404 returned error can't find the container with id f01175ec24006c21229eeea8bf60bb892e07c52668fd370e4a25b807aae1bb1f Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.237277 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-catalog-content\") pod \"b566ef69-a638-4547-b745-5aa46aff96c8\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.237816 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlfs8\" (UniqueName: \"kubernetes.io/projected/b566ef69-a638-4547-b745-5aa46aff96c8-kube-api-access-qlfs8\") pod \"b566ef69-a638-4547-b745-5aa46aff96c8\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.237847 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-utilities\") pod \"b566ef69-a638-4547-b745-5aa46aff96c8\" (UID: \"b566ef69-a638-4547-b745-5aa46aff96c8\") " Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.239085 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-utilities" (OuterVolumeSpecName: "utilities") pod "b566ef69-a638-4547-b745-5aa46aff96c8" (UID: "b566ef69-a638-4547-b745-5aa46aff96c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.245530 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b566ef69-a638-4547-b745-5aa46aff96c8-kube-api-access-qlfs8" (OuterVolumeSpecName: "kube-api-access-qlfs8") pod "b566ef69-a638-4547-b745-5aa46aff96c8" (UID: "b566ef69-a638-4547-b745-5aa46aff96c8"). InnerVolumeSpecName "kube-api-access-qlfs8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.339355 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlfs8\" (UniqueName: \"kubernetes.io/projected/b566ef69-a638-4547-b745-5aa46aff96c8-kube-api-access-qlfs8\") on node \"crc\" DevicePath \"\"" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.339486 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.386929 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b566ef69-a638-4547-b745-5aa46aff96c8" (UID: "b566ef69-a638-4547-b745-5aa46aff96c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.441198 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b566ef69-a638-4547-b745-5aa46aff96c8-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.490280 4751 generic.go:334] "Generic (PLEG): container finished" podID="b566ef69-a638-4547-b745-5aa46aff96c8" containerID="efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e" exitCode=0 Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.490384 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2lpgh" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.491497 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lpgh" event={"ID":"b566ef69-a638-4547-b745-5aa46aff96c8","Type":"ContainerDied","Data":"efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e"} Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.491647 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lpgh" event={"ID":"b566ef69-a638-4547-b745-5aa46aff96c8","Type":"ContainerDied","Data":"d63c2d91f391a8efb45753b4358334d1348194df75125753e8cbd4379f98911d"} Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.491683 4751 scope.go:117] "RemoveContainer" containerID="efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.494759 4751 generic.go:334] "Generic (PLEG): container finished" podID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerID="f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453" exitCode=0 Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.494798 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jthcm" event={"ID":"4e2f6e96-00c1-4f5f-969b-c2dc1441a474","Type":"ContainerDied","Data":"f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453"} Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.494822 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jthcm" event={"ID":"4e2f6e96-00c1-4f5f-969b-c2dc1441a474","Type":"ContainerStarted","Data":"f01175ec24006c21229eeea8bf60bb892e07c52668fd370e4a25b807aae1bb1f"} Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.537607 4751 scope.go:117] "RemoveContainer" containerID="467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.546388 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2lpgh"] Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.561673 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2lpgh"] Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.574052 4751 scope.go:117] "RemoveContainer" containerID="217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.600156 4751 scope.go:117] "RemoveContainer" containerID="efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e" Feb 27 17:52:31 crc kubenswrapper[4751]: E0227 17:52:31.604084 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e\": container with ID starting with efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e not found: ID does not exist" containerID="efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.604126 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e"} err="failed to get container status \"efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e\": rpc error: code = NotFound desc = could not find container \"efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e\": container with ID starting with efa44d0a99d441d37204cc9d1ebaef389569c8d1f13d41cfc119ec8328b4fa0e not found: ID does not exist" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.604154 4751 scope.go:117] "RemoveContainer" containerID="467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf" Feb 27 17:52:31 crc kubenswrapper[4751]: E0227 17:52:31.604498 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf\": container with ID starting with 467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf not found: ID does not exist" containerID="467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.604559 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf"} err="failed to get container status \"467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf\": rpc error: code = NotFound desc = could not find container \"467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf\": container with ID starting with 467bacb9ffde111083cb11d106f6d58e33037b337d42f5a57a85ac98da1370cf not found: ID does not exist" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.604601 4751 scope.go:117] "RemoveContainer" containerID="217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529" Feb 27 17:52:31 crc kubenswrapper[4751]: E0227 17:52:31.605355 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529\": container with ID starting with 217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529 not found: ID does not exist" containerID="217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529" Feb 27 17:52:31 crc kubenswrapper[4751]: I0227 17:52:31.605608 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529"} err="failed to get container status \"217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529\": rpc error: code = NotFound desc = could not find container \"217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529\": container with ID starting with 217bc0a7871366abaef9fb5ecf8b3fce19e0a54fee14df8074bd0ddc29613529 not found: ID does not exist" Feb 27 17:52:32 crc kubenswrapper[4751]: E0227 17:52:32.127276 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 27 17:52:32 crc kubenswrapper[4751]: E0227 17:52:32.127978 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c8qnm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-jthcm_openshift-marketplace(4e2f6e96-00c1-4f5f-969b-c2dc1441a474): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:52:32 crc kubenswrapper[4751]: E0227 17:52:32.129311 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/certified-operators-jthcm" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" Feb 27 17:52:32 crc kubenswrapper[4751]: E0227 17:52:32.511220 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-jthcm" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" Feb 27 17:52:32 crc kubenswrapper[4751]: I0227 17:52:32.548881 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b566ef69-a638-4547-b745-5aa46aff96c8" path="/var/lib/kubelet/pods/b566ef69-a638-4547-b745-5aa46aff96c8/volumes" Feb 27 17:52:44 crc kubenswrapper[4751]: E0227 17:52:44.971239 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 27 17:52:44 crc kubenswrapper[4751]: E0227 17:52:44.972645 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c8qnm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-jthcm_openshift-marketplace(4e2f6e96-00c1-4f5f-969b-c2dc1441a474): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:52:44 crc kubenswrapper[4751]: E0227 17:52:44.974766 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/certified-operator-index@sha256=625372062485d8ed1e4e84c388a7d036cb39c1b93d8c56dd3418fce0c028b62b/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/certified-operators-jthcm" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" Feb 27 17:52:52 crc kubenswrapper[4751]: I0227 17:52:52.709254 4751 scope.go:117] "RemoveContainer" containerID="a4bfc45fda689afd626f10704cf897f8fe73389f97b44133213d03a39f5f013c" Feb 27 17:52:58 crc kubenswrapper[4751]: I0227 17:52:58.918350 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:52:58 crc kubenswrapper[4751]: I0227 17:52:58.919237 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:52:59 crc kubenswrapper[4751]: E0227 17:52:59.525678 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-jthcm" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" Feb 27 17:53:14 crc kubenswrapper[4751]: I0227 17:53:14.932729 4751 generic.go:334] "Generic (PLEG): container finished" podID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerID="b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01" exitCode=0 Feb 27 17:53:14 crc kubenswrapper[4751]: I0227 17:53:14.932858 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jthcm" event={"ID":"4e2f6e96-00c1-4f5f-969b-c2dc1441a474","Type":"ContainerDied","Data":"b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01"} Feb 27 17:53:15 crc kubenswrapper[4751]: I0227 17:53:15.951757 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jthcm" event={"ID":"4e2f6e96-00c1-4f5f-969b-c2dc1441a474","Type":"ContainerStarted","Data":"76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81"} Feb 27 17:53:15 crc kubenswrapper[4751]: I0227 17:53:15.985665 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jthcm" podStartSLOduration=2.140782468 podStartE2EDuration="45.985642242s" podCreationTimestamp="2026-02-27 17:52:30 +0000 UTC" firstStartedPulling="2026-02-27 17:52:31.498253206 +0000 UTC m=+5313.645267693" lastFinishedPulling="2026-02-27 17:53:15.343113 +0000 UTC m=+5357.490127467" observedRunningTime="2026-02-27 17:53:15.979754098 +0000 UTC m=+5358.126768575" watchObservedRunningTime="2026-02-27 17:53:15.985642242 +0000 UTC m=+5358.132656699" Feb 27 17:53:20 crc kubenswrapper[4751]: I0227 17:53:20.590857 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:53:20 crc kubenswrapper[4751]: I0227 17:53:20.592534 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:53:20 crc kubenswrapper[4751]: I0227 17:53:20.669769 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:53:21 crc kubenswrapper[4751]: I0227 17:53:21.055767 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:53:21 crc kubenswrapper[4751]: I0227 17:53:21.114575 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jthcm"] Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.234280 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-cwkkv/must-gather-c54qh"] Feb 27 17:53:22 crc kubenswrapper[4751]: E0227 17:53:22.234924 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b566ef69-a638-4547-b745-5aa46aff96c8" containerName="registry-server" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.234938 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b566ef69-a638-4547-b745-5aa46aff96c8" containerName="registry-server" Feb 27 17:53:22 crc kubenswrapper[4751]: E0227 17:53:22.234980 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b566ef69-a638-4547-b745-5aa46aff96c8" containerName="extract-utilities" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.234990 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b566ef69-a638-4547-b745-5aa46aff96c8" containerName="extract-utilities" Feb 27 17:53:22 crc kubenswrapper[4751]: E0227 17:53:22.235005 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b566ef69-a638-4547-b745-5aa46aff96c8" containerName="extract-content" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.235014 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="b566ef69-a638-4547-b745-5aa46aff96c8" containerName="extract-content" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.235187 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="b566ef69-a638-4547-b745-5aa46aff96c8" containerName="registry-server" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.236105 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cwkkv/must-gather-c54qh" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.238933 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-cwkkv"/"kube-root-ca.crt" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.242896 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-cwkkv"/"openshift-service-ca.crt" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.244631 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-cwkkv/must-gather-c54qh"] Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.248234 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-cwkkv"/"default-dockercfg-4qqtv" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.419332 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-must-gather-output\") pod \"must-gather-c54qh\" (UID: \"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533\") " pod="openshift-must-gather-cwkkv/must-gather-c54qh" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.419543 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8rgb\" (UniqueName: \"kubernetes.io/projected/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-kube-api-access-q8rgb\") pod \"must-gather-c54qh\" (UID: \"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533\") " pod="openshift-must-gather-cwkkv/must-gather-c54qh" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.520720 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-must-gather-output\") pod \"must-gather-c54qh\" (UID: \"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533\") " pod="openshift-must-gather-cwkkv/must-gather-c54qh" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.520852 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8rgb\" (UniqueName: \"kubernetes.io/projected/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-kube-api-access-q8rgb\") pod \"must-gather-c54qh\" (UID: \"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533\") " pod="openshift-must-gather-cwkkv/must-gather-c54qh" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.521113 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-must-gather-output\") pod \"must-gather-c54qh\" (UID: \"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533\") " pod="openshift-must-gather-cwkkv/must-gather-c54qh" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.552892 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8rgb\" (UniqueName: \"kubernetes.io/projected/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-kube-api-access-q8rgb\") pod \"must-gather-c54qh\" (UID: \"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533\") " pod="openshift-must-gather-cwkkv/must-gather-c54qh" Feb 27 17:53:22 crc kubenswrapper[4751]: I0227 17:53:22.553798 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cwkkv/must-gather-c54qh" Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.003992 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-cwkkv/must-gather-c54qh"] Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.038757 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cwkkv/must-gather-c54qh" event={"ID":"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533","Type":"ContainerStarted","Data":"7733d02e309787ae906b91ac38c90ff638863cca45848770481ae07a082a3aa6"} Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.038961 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jthcm" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerName="registry-server" containerID="cri-o://76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81" gracePeriod=2 Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.443829 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.642061 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8qnm\" (UniqueName: \"kubernetes.io/projected/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-kube-api-access-c8qnm\") pod \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.642117 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-utilities\") pod \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.642334 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-catalog-content\") pod \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\" (UID: \"4e2f6e96-00c1-4f5f-969b-c2dc1441a474\") " Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.643278 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-utilities" (OuterVolumeSpecName: "utilities") pod "4e2f6e96-00c1-4f5f-969b-c2dc1441a474" (UID: "4e2f6e96-00c1-4f5f-969b-c2dc1441a474"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.649157 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-kube-api-access-c8qnm" (OuterVolumeSpecName: "kube-api-access-c8qnm") pod "4e2f6e96-00c1-4f5f-969b-c2dc1441a474" (UID: "4e2f6e96-00c1-4f5f-969b-c2dc1441a474"). InnerVolumeSpecName "kube-api-access-c8qnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.708827 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e2f6e96-00c1-4f5f-969b-c2dc1441a474" (UID: "4e2f6e96-00c1-4f5f-969b-c2dc1441a474"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.744300 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.744344 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8qnm\" (UniqueName: \"kubernetes.io/projected/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-kube-api-access-c8qnm\") on node \"crc\" DevicePath \"\"" Feb 27 17:53:23 crc kubenswrapper[4751]: I0227 17:53:23.744358 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e2f6e96-00c1-4f5f-969b-c2dc1441a474-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.051097 4751 generic.go:334] "Generic (PLEG): container finished" podID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerID="76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81" exitCode=0 Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.051150 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jthcm" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.051145 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jthcm" event={"ID":"4e2f6e96-00c1-4f5f-969b-c2dc1441a474","Type":"ContainerDied","Data":"76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81"} Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.051195 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jthcm" event={"ID":"4e2f6e96-00c1-4f5f-969b-c2dc1441a474","Type":"ContainerDied","Data":"f01175ec24006c21229eeea8bf60bb892e07c52668fd370e4a25b807aae1bb1f"} Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.051216 4751 scope.go:117] "RemoveContainer" containerID="76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.083685 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jthcm"] Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.086755 4751 scope.go:117] "RemoveContainer" containerID="b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.091446 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jthcm"] Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.105885 4751 scope.go:117] "RemoveContainer" containerID="f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.156991 4751 scope.go:117] "RemoveContainer" containerID="76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81" Feb 27 17:53:24 crc kubenswrapper[4751]: E0227 17:53:24.157769 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81\": container with ID starting with 76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81 not found: ID does not exist" containerID="76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.157816 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81"} err="failed to get container status \"76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81\": rpc error: code = NotFound desc = could not find container \"76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81\": container with ID starting with 76b567ecd130362c10775a4a3cedbe1d0b49f5af269bc9537c4c7e75b7176d81 not found: ID does not exist" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.157847 4751 scope.go:117] "RemoveContainer" containerID="b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01" Feb 27 17:53:24 crc kubenswrapper[4751]: E0227 17:53:24.158149 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01\": container with ID starting with b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01 not found: ID does not exist" containerID="b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.158182 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01"} err="failed to get container status \"b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01\": rpc error: code = NotFound desc = could not find container \"b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01\": container with ID starting with b3952c38b8d455d87887d0ee15a23d6a7c0ff9e698aa36be3ebad67444891f01 not found: ID does not exist" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.158204 4751 scope.go:117] "RemoveContainer" containerID="f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453" Feb 27 17:53:24 crc kubenswrapper[4751]: E0227 17:53:24.158439 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453\": container with ID starting with f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453 not found: ID does not exist" containerID="f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.158464 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453"} err="failed to get container status \"f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453\": rpc error: code = NotFound desc = could not find container \"f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453\": container with ID starting with f85565d7f324d1506a0eb9c5ea65107c6bddc698d480276fe029a72f38191453 not found: ID does not exist" Feb 27 17:53:24 crc kubenswrapper[4751]: I0227 17:53:24.535118 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" path="/var/lib/kubelet/pods/4e2f6e96-00c1-4f5f-969b-c2dc1441a474/volumes" Feb 27 17:53:28 crc kubenswrapper[4751]: I0227 17:53:28.918358 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 17:53:28 crc kubenswrapper[4751]: I0227 17:53:28.918721 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 17:53:28 crc kubenswrapper[4751]: I0227 17:53:28.918780 4751 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" Feb 27 17:53:28 crc kubenswrapper[4751]: I0227 17:53:28.919840 4751 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f"} pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 27 17:53:28 crc kubenswrapper[4751]: I0227 17:53:28.919973 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" containerID="cri-o://0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" gracePeriod=600 Feb 27 17:53:29 crc kubenswrapper[4751]: I0227 17:53:29.106181 4751 generic.go:334] "Generic (PLEG): container finished" podID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" exitCode=0 Feb 27 17:53:29 crc kubenswrapper[4751]: I0227 17:53:29.106263 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerDied","Data":"0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f"} Feb 27 17:53:29 crc kubenswrapper[4751]: I0227 17:53:29.107289 4751 scope.go:117] "RemoveContainer" containerID="f6933fc985518da96693a8b2d200d4e49145e296c6cdf1cf2e8ca5bb1a0e9524" Feb 27 17:53:30 crc kubenswrapper[4751]: E0227 17:53:30.189355 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:53:31 crc kubenswrapper[4751]: I0227 17:53:31.130030 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cwkkv/must-gather-c54qh" event={"ID":"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533","Type":"ContainerStarted","Data":"dc968966fe9e198e27d3d584d5f63107477eff28bbee525a56c0baaee97d87da"} Feb 27 17:53:31 crc kubenswrapper[4751]: I0227 17:53:31.130168 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cwkkv/must-gather-c54qh" event={"ID":"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533","Type":"ContainerStarted","Data":"41abf478f145aa71ef6354f31c2c4d743c04fd5a756e90b6df776440ae11d531"} Feb 27 17:53:31 crc kubenswrapper[4751]: I0227 17:53:31.136087 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:53:31 crc kubenswrapper[4751]: E0227 17:53:31.136551 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:53:31 crc kubenswrapper[4751]: I0227 17:53:31.161735 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-cwkkv/must-gather-c54qh" podStartSLOduration=1.886135788 podStartE2EDuration="9.1617106s" podCreationTimestamp="2026-02-27 17:53:22 +0000 UTC" firstStartedPulling="2026-02-27 17:53:23.013624485 +0000 UTC m=+5365.160638932" lastFinishedPulling="2026-02-27 17:53:30.289199257 +0000 UTC m=+5372.436213744" observedRunningTime="2026-02-27 17:53:31.151704598 +0000 UTC m=+5373.298719055" watchObservedRunningTime="2026-02-27 17:53:31.1617106 +0000 UTC m=+5373.308725057" Feb 27 17:53:45 crc kubenswrapper[4751]: I0227 17:53:45.521436 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:53:45 crc kubenswrapper[4751]: E0227 17:53:45.522486 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:53:52 crc kubenswrapper[4751]: I0227 17:53:52.838112 4751 scope.go:117] "RemoveContainer" containerID="7d3519e3f23106ad8a4119f3fd28686c8918f7ca6ab5db2724aa03674f071e31" Feb 27 17:53:59 crc kubenswrapper[4751]: I0227 17:53:59.520593 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:53:59 crc kubenswrapper[4751]: E0227 17:53:59.521701 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.146001 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536914-f48kv"] Feb 27 17:54:00 crc kubenswrapper[4751]: E0227 17:54:00.146863 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerName="extract-content" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.146896 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerName="extract-content" Feb 27 17:54:00 crc kubenswrapper[4751]: E0227 17:54:00.146943 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerName="registry-server" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.146956 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerName="registry-server" Feb 27 17:54:00 crc kubenswrapper[4751]: E0227 17:54:00.146985 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerName="extract-utilities" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.146997 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerName="extract-utilities" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.147226 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e2f6e96-00c1-4f5f-969b-c2dc1441a474" containerName="registry-server" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.147980 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536914-f48kv" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.152197 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.152266 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.152320 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.161153 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536914-f48kv"] Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.246880 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6m2k\" (UniqueName: \"kubernetes.io/projected/be3371f7-acf3-453a-b564-bfd78f1b3225-kube-api-access-q6m2k\") pod \"auto-csr-approver-29536914-f48kv\" (UID: \"be3371f7-acf3-453a-b564-bfd78f1b3225\") " pod="openshift-infra/auto-csr-approver-29536914-f48kv" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.348500 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6m2k\" (UniqueName: \"kubernetes.io/projected/be3371f7-acf3-453a-b564-bfd78f1b3225-kube-api-access-q6m2k\") pod \"auto-csr-approver-29536914-f48kv\" (UID: \"be3371f7-acf3-453a-b564-bfd78f1b3225\") " pod="openshift-infra/auto-csr-approver-29536914-f48kv" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.370735 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6m2k\" (UniqueName: \"kubernetes.io/projected/be3371f7-acf3-453a-b564-bfd78f1b3225-kube-api-access-q6m2k\") pod \"auto-csr-approver-29536914-f48kv\" (UID: \"be3371f7-acf3-453a-b564-bfd78f1b3225\") " pod="openshift-infra/auto-csr-approver-29536914-f48kv" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.484131 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536914-f48kv" Feb 27 17:54:00 crc kubenswrapper[4751]: I0227 17:54:00.741087 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536914-f48kv"] Feb 27 17:54:01 crc kubenswrapper[4751]: I0227 17:54:01.369421 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536914-f48kv" event={"ID":"be3371f7-acf3-453a-b564-bfd78f1b3225","Type":"ContainerStarted","Data":"7ccc8218574ff86ac4c799f1480a7deaa05e7db221910d0ce0b228e540fd0b14"} Feb 27 17:54:02 crc kubenswrapper[4751]: I0227 17:54:02.379844 4751 generic.go:334] "Generic (PLEG): container finished" podID="be3371f7-acf3-453a-b564-bfd78f1b3225" containerID="bb343bcbe29c802aa0abe9e19851b234644c85fa0c305b4b861cd00d409ed9a5" exitCode=0 Feb 27 17:54:02 crc kubenswrapper[4751]: I0227 17:54:02.380025 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536914-f48kv" event={"ID":"be3371f7-acf3-453a-b564-bfd78f1b3225","Type":"ContainerDied","Data":"bb343bcbe29c802aa0abe9e19851b234644c85fa0c305b4b861cd00d409ed9a5"} Feb 27 17:54:03 crc kubenswrapper[4751]: I0227 17:54:03.749203 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536914-f48kv" Feb 27 17:54:03 crc kubenswrapper[4751]: I0227 17:54:03.910159 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6m2k\" (UniqueName: \"kubernetes.io/projected/be3371f7-acf3-453a-b564-bfd78f1b3225-kube-api-access-q6m2k\") pod \"be3371f7-acf3-453a-b564-bfd78f1b3225\" (UID: \"be3371f7-acf3-453a-b564-bfd78f1b3225\") " Feb 27 17:54:03 crc kubenswrapper[4751]: I0227 17:54:03.915542 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be3371f7-acf3-453a-b564-bfd78f1b3225-kube-api-access-q6m2k" (OuterVolumeSpecName: "kube-api-access-q6m2k") pod "be3371f7-acf3-453a-b564-bfd78f1b3225" (UID: "be3371f7-acf3-453a-b564-bfd78f1b3225"). InnerVolumeSpecName "kube-api-access-q6m2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:54:04 crc kubenswrapper[4751]: I0227 17:54:04.012620 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6m2k\" (UniqueName: \"kubernetes.io/projected/be3371f7-acf3-453a-b564-bfd78f1b3225-kube-api-access-q6m2k\") on node \"crc\" DevicePath \"\"" Feb 27 17:54:04 crc kubenswrapper[4751]: I0227 17:54:04.407426 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536914-f48kv" event={"ID":"be3371f7-acf3-453a-b564-bfd78f1b3225","Type":"ContainerDied","Data":"7ccc8218574ff86ac4c799f1480a7deaa05e7db221910d0ce0b228e540fd0b14"} Feb 27 17:54:04 crc kubenswrapper[4751]: I0227 17:54:04.407706 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ccc8218574ff86ac4c799f1480a7deaa05e7db221910d0ce0b228e540fd0b14" Feb 27 17:54:04 crc kubenswrapper[4751]: I0227 17:54:04.407472 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536914-f48kv" Feb 27 17:54:04 crc kubenswrapper[4751]: I0227 17:54:04.822192 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536908-8tlz6"] Feb 27 17:54:04 crc kubenswrapper[4751]: I0227 17:54:04.828449 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536908-8tlz6"] Feb 27 17:54:06 crc kubenswrapper[4751]: I0227 17:54:06.531487 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d75ed18f-9cc7-4c8f-9856-130e8b5932f8" path="/var/lib/kubelet/pods/d75ed18f-9cc7-4c8f-9856-130e8b5932f8/volumes" Feb 27 17:54:12 crc kubenswrapper[4751]: I0227 17:54:12.522794 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:54:12 crc kubenswrapper[4751]: E0227 17:54:12.523420 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:54:23 crc kubenswrapper[4751]: I0227 17:54:23.427600 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5b7946d7b9-5n9cf_5e1a4b82-5d0d-40aa-98cf-8449a543be92/init/0.log" Feb 27 17:54:23 crc kubenswrapper[4751]: I0227 17:54:23.646552 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5b7946d7b9-5n9cf_5e1a4b82-5d0d-40aa-98cf-8449a543be92/init/0.log" Feb 27 17:54:23 crc kubenswrapper[4751]: I0227 17:54:23.670905 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5b7946d7b9-5n9cf_5e1a4b82-5d0d-40aa-98cf-8449a543be92/dnsmasq-dns/0.log" Feb 27 17:54:23 crc kubenswrapper[4751]: I0227 17:54:23.820637 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_f830ff03-d833-4cb2-96fa-19216f5df45a/memcached/0.log" Feb 27 17:54:23 crc kubenswrapper[4751]: I0227 17:54:23.898432 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_88052214-0232-41d1-979a-1d1b8a45d674/mysql-bootstrap/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.006746 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_88052214-0232-41d1-979a-1d1b8a45d674/mysql-bootstrap/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.050215 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_88052214-0232-41d1-979a-1d1b8a45d674/galera/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.103689 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_2a4b1b5e-d595-4ff3-a99a-e13198440ef3/mysql-bootstrap/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.248458 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_2a4b1b5e-d595-4ff3-a99a-e13198440ef3/mysql-bootstrap/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.279127 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_2a4b1b5e-d595-4ff3-a99a-e13198440ef3/galera/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.328635 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_72e8aad9-a325-4eb4-87fa-6b326ceb9a26/setup-container/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.444634 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_72e8aad9-a325-4eb4-87fa-6b326ceb9a26/rabbitmq/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.453676 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_72e8aad9-a325-4eb4-87fa-6b326ceb9a26/setup-container/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.520484 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:54:24 crc kubenswrapper[4751]: E0227 17:54:24.520760 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.534168 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_1784ca9a-fc20-4ed5-b770-3f1ea06b7065/setup-container/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.651765 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_1784ca9a-fc20-4ed5-b770-3f1ea06b7065/setup-container/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.709728 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_1784ca9a-fc20-4ed5-b770-3f1ea06b7065/rabbitmq/0.log" Feb 27 17:54:24 crc kubenswrapper[4751]: I0227 17:54:24.765081 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_root-account-create-update-zxkc7_d766e975-f2e6-4733-aaea-d3725ec03ec2/mariadb-account-create-update/0.log" Feb 27 17:54:39 crc kubenswrapper[4751]: I0227 17:54:39.521457 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:54:39 crc kubenswrapper[4751]: E0227 17:54:39.522462 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:54:40 crc kubenswrapper[4751]: I0227 17:54:40.902825 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7_c7b68099-ffa0-4702-a816-f63c3ff1f53d/util/0.log" Feb 27 17:54:41 crc kubenswrapper[4751]: I0227 17:54:41.022985 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7_c7b68099-ffa0-4702-a816-f63c3ff1f53d/util/0.log" Feb 27 17:54:41 crc kubenswrapper[4751]: I0227 17:54:41.047384 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7_c7b68099-ffa0-4702-a816-f63c3ff1f53d/pull/0.log" Feb 27 17:54:41 crc kubenswrapper[4751]: I0227 17:54:41.067236 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7_c7b68099-ffa0-4702-a816-f63c3ff1f53d/pull/0.log" Feb 27 17:54:41 crc kubenswrapper[4751]: I0227 17:54:41.238315 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7_c7b68099-ffa0-4702-a816-f63c3ff1f53d/pull/0.log" Feb 27 17:54:41 crc kubenswrapper[4751]: I0227 17:54:41.255623 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7_c7b68099-ffa0-4702-a816-f63c3ff1f53d/extract/0.log" Feb 27 17:54:41 crc kubenswrapper[4751]: I0227 17:54:41.262481 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_58ede26cfcb7a31936df73291b266706050c4d7231f0b5a0c638704287xxdm7_c7b68099-ffa0-4702-a816-f63c3ff1f53d/util/0.log" Feb 27 17:54:41 crc kubenswrapper[4751]: I0227 17:54:41.751451 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-5d87c9d997-t6cx4_105de0b5-2fbb-4c56-b286-6466e76e6db6/manager/0.log" Feb 27 17:54:42 crc kubenswrapper[4751]: I0227 17:54:42.079139 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-64db6967f8-qgwmb_094b19ab-3e9a-4a80-b5c6-177790fd63f0/manager/0.log" Feb 27 17:54:42 crc kubenswrapper[4751]: I0227 17:54:42.269568 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-cf99c678f-cft6c_648389a6-8f01-4a6e-916e-c3b567817015/manager/0.log" Feb 27 17:54:42 crc kubenswrapper[4751]: I0227 17:54:42.462139 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-78bc7f9bd9-vtnnn_9126808a-112b-45e2-82fc-9f71b9ac3545/manager/0.log" Feb 27 17:54:43 crc kubenswrapper[4751]: I0227 17:54:43.283800 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-55d77d7b5c-6nxxw_d31044d2-895d-4bb3-8af2-2cdb852fea06/manager/0.log" Feb 27 17:54:43 crc kubenswrapper[4751]: I0227 17:54:43.542801 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-545456dc4-9vtcw_d466fdb9-e7bd-4ea1-8e4d-0a260ba3f0a0/manager/0.log" Feb 27 17:54:43 crc kubenswrapper[4751]: I0227 17:54:43.681349 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-f7fcc58b9-74jk2_367c4281-7780-478a-ae73-263cf73aa15e/manager/0.log" Feb 27 17:54:43 crc kubenswrapper[4751]: I0227 17:54:43.810047 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7c789f89c6-8x8p7_eebbb996-efc8-4dd2-9840-da330af0ec75/manager/0.log" Feb 27 17:54:43 crc kubenswrapper[4751]: I0227 17:54:43.883564 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-67d996989d-wpjmf_4cdc0be8-19db-4cab-a32a-11848fab949d/manager/0.log" Feb 27 17:54:44 crc kubenswrapper[4751]: I0227 17:54:44.031496 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-7b6bfb6475-jlzhk_26f8413e-34ed-4e45-8eec-e06ba73d1a8b/manager/0.log" Feb 27 17:54:44 crc kubenswrapper[4751]: I0227 17:54:44.194024 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-54688575f-gltv4_789bdce1-90fe-43ec-89a2-4f0669899b1d/manager/0.log" Feb 27 17:54:44 crc kubenswrapper[4751]: I0227 17:54:44.383115 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-74b6b5dc96-dqshb_eac3a3e7-7a56-4774-8a2f-2f6998e678c1/manager/0.log" Feb 27 17:54:44 crc kubenswrapper[4751]: I0227 17:54:44.390221 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-5d86c7ddb7-p4qmg_22dbe1eb-ede5-439e-b447-c79f5051a22d/manager/0.log" Feb 27 17:54:44 crc kubenswrapper[4751]: I0227 17:54:44.542339 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7c6767dc9cx24vv_b4054232-d1c6-469a-ab62-3bc130b5535b/manager/0.log" Feb 27 17:54:44 crc kubenswrapper[4751]: I0227 17:54:44.888927 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5db4b47666-dfzxh_8fb412a1-d193-470d-8437-fae88c40c731/operator/0.log" Feb 27 17:54:44 crc kubenswrapper[4751]: I0227 17:54:44.933499 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-m6wq4_6525b243-d114-45bf-ab4f-c859cdadee78/registry-server/0.log" Feb 27 17:54:45 crc kubenswrapper[4751]: I0227 17:54:45.138940 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-75684d597f-vzmw2_6e95d387-339b-4ee7-b244-a1d82cb9f14e/manager/0.log" Feb 27 17:54:45 crc kubenswrapper[4751]: I0227 17:54:45.341278 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-648564c9fc-qbhxg_4a8b3476-6579-4458-ac2b-ba9795eaa9eb/manager/0.log" Feb 27 17:54:45 crc kubenswrapper[4751]: I0227 17:54:45.348179 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-kv5gk_d570bae3-0595-480f-bebc-80d86a0618d3/operator/0.log" Feb 27 17:54:45 crc kubenswrapper[4751]: I0227 17:54:45.579476 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9b9ff9f4d-4zzmq_a0ef8d16-b0cf-4cb0-8e47-b1c10a3a13d7/manager/0.log" Feb 27 17:54:45 crc kubenswrapper[4751]: I0227 17:54:45.726134 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-55b5ff4dbb-tcndr_91067468-8654-4bfd-b921-15679cf507c9/manager/0.log" Feb 27 17:54:45 crc kubenswrapper[4751]: I0227 17:54:45.768707 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5fdb694969-w8rl5_00579c6f-e25e-4b49-b43d-50547230a24d/manager/0.log" Feb 27 17:54:45 crc kubenswrapper[4751]: I0227 17:54:45.783308 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-789bbcd94f-grbwc_0b8c1cf8-d3b0-4220-bbc5-81ccf3830782/manager/0.log" Feb 27 17:54:45 crc kubenswrapper[4751]: I0227 17:54:45.933387 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-bccc79885-6bqxl_5bba51b5-db12-42b8-80bd-c38ff3d7bfd4/manager/0.log" Feb 27 17:54:52 crc kubenswrapper[4751]: I0227 17:54:52.111357 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6db6876945-d2xqb_69ae63f1-4df4-46d1-89b1-4e0c4f60d83f/manager/0.log" Feb 27 17:54:52 crc kubenswrapper[4751]: I0227 17:54:52.520366 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:54:52 crc kubenswrapper[4751]: E0227 17:54:52.520643 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:55:05 crc kubenswrapper[4751]: I0227 17:55:05.521650 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:55:05 crc kubenswrapper[4751]: E0227 17:55:05.522821 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:55:08 crc kubenswrapper[4751]: I0227 17:55:08.270653 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-rjqrq_4155e5dc-eb83-4d58-bb2b-554fcbda2e8c/control-plane-machine-set-operator/0.log" Feb 27 17:55:08 crc kubenswrapper[4751]: I0227 17:55:08.395931 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-7hctb_ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3/kube-rbac-proxy/0.log" Feb 27 17:55:08 crc kubenswrapper[4751]: I0227 17:55:08.469545 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-7hctb_ccc3d705-29c7-4c23-82cf-e8f4cbcdacb3/machine-api-operator/0.log" Feb 27 17:55:16 crc kubenswrapper[4751]: I0227 17:55:16.521256 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:55:16 crc kubenswrapper[4751]: E0227 17:55:16.522230 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:55:21 crc kubenswrapper[4751]: I0227 17:55:21.708151 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-545d4d4674-dk9z8_c4450555-cd8d-466f-80e0-6d957133213f/cert-manager-controller/0.log" Feb 27 17:55:21 crc kubenswrapper[4751]: I0227 17:55:21.882221 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-5545bd876-j4nqw_b89ea261-77e6-4d3d-92ac-d538d32ecea0/cert-manager-cainjector/0.log" Feb 27 17:55:21 crc kubenswrapper[4751]: I0227 17:55:21.996370 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-6888856db4-svfnj_fcae9fa3-883c-4cba-b6e4-752ba0d7ae2c/cert-manager-webhook/0.log" Feb 27 17:55:30 crc kubenswrapper[4751]: I0227 17:55:30.521019 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:55:30 crc kubenswrapper[4751]: E0227 17:55:30.522750 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:55:35 crc kubenswrapper[4751]: I0227 17:55:35.726686 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5dcbbd79cf-nzj7f_fd2468a9-6de3-40c7-b91a-b2f47b9737b7/nmstate-console-plugin/0.log" Feb 27 17:55:35 crc kubenswrapper[4751]: I0227 17:55:35.900688 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-6mj44_aac8927a-2c6c-40e7-9357-9899dd63927b/nmstate-handler/0.log" Feb 27 17:55:35 crc kubenswrapper[4751]: I0227 17:55:35.956782 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-69594cc75-qtlsh_b80ace26-6333-4a22-8c2c-74c9d023f1b7/kube-rbac-proxy/0.log" Feb 27 17:55:35 crc kubenswrapper[4751]: I0227 17:55:35.979358 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-69594cc75-qtlsh_b80ace26-6333-4a22-8c2c-74c9d023f1b7/nmstate-metrics/0.log" Feb 27 17:55:36 crc kubenswrapper[4751]: I0227 17:55:36.108800 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-75c5dccd6c-fxswb_cd7678bf-01a7-443e-9a9f-0c1297607112/nmstate-operator/0.log" Feb 27 17:55:36 crc kubenswrapper[4751]: I0227 17:55:36.149997 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-786f45cff4-mvl65_740f3d53-7dcd-4f19-8c7e-35ea882c433f/nmstate-webhook/0.log" Feb 27 17:55:44 crc kubenswrapper[4751]: I0227 17:55:44.520706 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:55:44 crc kubenswrapper[4751]: E0227 17:55:44.521636 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:55:52 crc kubenswrapper[4751]: I0227 17:55:52.916352 4751 scope.go:117] "RemoveContainer" containerID="12b13674f4fcd487f8ec225c4dc5f8935bd1f3e6516dd84e58bb8a5eec8d3b8d" Feb 27 17:55:53 crc kubenswrapper[4751]: I0227 17:55:53.007028 4751 scope.go:117] "RemoveContainer" containerID="0a2bdbcce9bd97aef4be3ddbc8bcdbb443bf0cd04528eeacf1e4292b7c0a3762" Feb 27 17:55:53 crc kubenswrapper[4751]: I0227 17:55:53.056356 4751 scope.go:117] "RemoveContainer" containerID="8f1cc5d095b1e53234cd149e63fc8caf74fdb0464ff2bd0562acf9d4880cc52e" Feb 27 17:55:59 crc kubenswrapper[4751]: I0227 17:55:59.521176 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:55:59 crc kubenswrapper[4751]: E0227 17:55:59.522218 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.151195 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536916-4wrfm"] Feb 27 17:56:00 crc kubenswrapper[4751]: E0227 17:56:00.152299 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be3371f7-acf3-453a-b564-bfd78f1b3225" containerName="oc" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.152335 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="be3371f7-acf3-453a-b564-bfd78f1b3225" containerName="oc" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.152656 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="be3371f7-acf3-453a-b564-bfd78f1b3225" containerName="oc" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.153614 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536916-4wrfm" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.156389 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.157507 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.160070 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536916-4wrfm"] Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.160971 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.318740 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6dv9\" (UniqueName: \"kubernetes.io/projected/d6db4b30-a183-4074-87d5-5d3e53c3ad90-kube-api-access-z6dv9\") pod \"auto-csr-approver-29536916-4wrfm\" (UID: \"d6db4b30-a183-4074-87d5-5d3e53c3ad90\") " pod="openshift-infra/auto-csr-approver-29536916-4wrfm" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.420191 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6dv9\" (UniqueName: \"kubernetes.io/projected/d6db4b30-a183-4074-87d5-5d3e53c3ad90-kube-api-access-z6dv9\") pod \"auto-csr-approver-29536916-4wrfm\" (UID: \"d6db4b30-a183-4074-87d5-5d3e53c3ad90\") " pod="openshift-infra/auto-csr-approver-29536916-4wrfm" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.450577 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6dv9\" (UniqueName: \"kubernetes.io/projected/d6db4b30-a183-4074-87d5-5d3e53c3ad90-kube-api-access-z6dv9\") pod \"auto-csr-approver-29536916-4wrfm\" (UID: \"d6db4b30-a183-4074-87d5-5d3e53c3ad90\") " pod="openshift-infra/auto-csr-approver-29536916-4wrfm" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.481579 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536916-4wrfm" Feb 27 17:56:00 crc kubenswrapper[4751]: I0227 17:56:00.948600 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536916-4wrfm"] Feb 27 17:56:01 crc kubenswrapper[4751]: I0227 17:56:01.343983 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536916-4wrfm" event={"ID":"d6db4b30-a183-4074-87d5-5d3e53c3ad90","Type":"ContainerStarted","Data":"38f2f0dd7f7dc741394ff28ad00f27e0758e016c4a9e439f0364eeaccca0a596"} Feb 27 17:56:01 crc kubenswrapper[4751]: E0227 17:56:01.795134 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:56:01 crc kubenswrapper[4751]: E0227 17:56:01.795264 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:56:01 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:56:01 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z6dv9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536916-4wrfm_openshift-infra(d6db4b30-a183-4074-87d5-5d3e53c3ad90): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:56:01 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:56:01 crc kubenswrapper[4751]: E0227 17:56:01.796599 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536916-4wrfm" podUID="d6db4b30-a183-4074-87d5-5d3e53c3ad90" Feb 27 17:56:02 crc kubenswrapper[4751]: E0227 17:56:02.353820 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536916-4wrfm" podUID="d6db4b30-a183-4074-87d5-5d3e53c3ad90" Feb 27 17:56:05 crc kubenswrapper[4751]: I0227 17:56:05.552420 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-86ddb6bd46-z2rn9_d5d4586f-9e6d-471f-a07c-96e142df13ec/kube-rbac-proxy/0.log" Feb 27 17:56:05 crc kubenswrapper[4751]: I0227 17:56:05.741453 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-frr-files/0.log" Feb 27 17:56:05 crc kubenswrapper[4751]: I0227 17:56:05.933049 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-reloader/0.log" Feb 27 17:56:05 crc kubenswrapper[4751]: I0227 17:56:05.955523 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-frr-files/0.log" Feb 27 17:56:05 crc kubenswrapper[4751]: I0227 17:56:05.972764 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-metrics/0.log" Feb 27 17:56:05 crc kubenswrapper[4751]: I0227 17:56:05.976751 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-86ddb6bd46-z2rn9_d5d4586f-9e6d-471f-a07c-96e142df13ec/controller/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.090608 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-reloader/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.236963 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-frr-files/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.280801 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-reloader/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.316625 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-metrics/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.349686 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-metrics/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.476945 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-metrics/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.483293 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-frr-files/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.494118 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/cp-reloader/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.523987 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/controller/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.629580 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/frr-metrics/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.696825 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/kube-rbac-proxy/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.720483 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/kube-rbac-proxy-frr/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.835532 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/reloader/0.log" Feb 27 17:56:06 crc kubenswrapper[4751]: I0227 17:56:06.967457 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7f989f654f-pr9vm_5e236cf5-35f7-4af5-8b59-1ca8b8dde5d7/frr-k8s-webhook-server/0.log" Feb 27 17:56:07 crc kubenswrapper[4751]: I0227 17:56:07.079150 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-b447877db-svmqs_f91806a5-f679-477e-b1fe-9f35cbfda94d/manager/0.log" Feb 27 17:56:07 crc kubenswrapper[4751]: I0227 17:56:07.231188 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-58f8d77f4f-vvpvl_ac5ec3ef-d5a4-4a18-839b-820031c0c971/webhook-server/0.log" Feb 27 17:56:07 crc kubenswrapper[4751]: I0227 17:56:07.326785 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-jr2bv_620b652a-8111-4c0a-86b0-d692bb768d8b/kube-rbac-proxy/0.log" Feb 27 17:56:07 crc kubenswrapper[4751]: I0227 17:56:07.940975 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-jr2bv_620b652a-8111-4c0a-86b0-d692bb768d8b/speaker/0.log" Feb 27 17:56:08 crc kubenswrapper[4751]: I0227 17:56:08.359971 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nqkcw_51c1c1cc-8e81-4198-b580-3d511ed64669/frr/0.log" Feb 27 17:56:14 crc kubenswrapper[4751]: I0227 17:56:14.524483 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:56:14 crc kubenswrapper[4751]: E0227 17:56:14.525513 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:56:15 crc kubenswrapper[4751]: I0227 17:56:15.455736 4751 generic.go:334] "Generic (PLEG): container finished" podID="d6db4b30-a183-4074-87d5-5d3e53c3ad90" containerID="18bca44042c49edda1bd894b5dd46c5b3263ad1abfc8a85321a277b17f7c71e7" exitCode=0 Feb 27 17:56:15 crc kubenswrapper[4751]: I0227 17:56:15.455836 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536916-4wrfm" event={"ID":"d6db4b30-a183-4074-87d5-5d3e53c3ad90","Type":"ContainerDied","Data":"18bca44042c49edda1bd894b5dd46c5b3263ad1abfc8a85321a277b17f7c71e7"} Feb 27 17:56:16 crc kubenswrapper[4751]: I0227 17:56:16.755879 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536916-4wrfm" Feb 27 17:56:16 crc kubenswrapper[4751]: I0227 17:56:16.813683 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6dv9\" (UniqueName: \"kubernetes.io/projected/d6db4b30-a183-4074-87d5-5d3e53c3ad90-kube-api-access-z6dv9\") pod \"d6db4b30-a183-4074-87d5-5d3e53c3ad90\" (UID: \"d6db4b30-a183-4074-87d5-5d3e53c3ad90\") " Feb 27 17:56:16 crc kubenswrapper[4751]: I0227 17:56:16.821263 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6db4b30-a183-4074-87d5-5d3e53c3ad90-kube-api-access-z6dv9" (OuterVolumeSpecName: "kube-api-access-z6dv9") pod "d6db4b30-a183-4074-87d5-5d3e53c3ad90" (UID: "d6db4b30-a183-4074-87d5-5d3e53c3ad90"). InnerVolumeSpecName "kube-api-access-z6dv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:56:16 crc kubenswrapper[4751]: I0227 17:56:16.915757 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6dv9\" (UniqueName: \"kubernetes.io/projected/d6db4b30-a183-4074-87d5-5d3e53c3ad90-kube-api-access-z6dv9\") on node \"crc\" DevicePath \"\"" Feb 27 17:56:17 crc kubenswrapper[4751]: I0227 17:56:17.475482 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536916-4wrfm" event={"ID":"d6db4b30-a183-4074-87d5-5d3e53c3ad90","Type":"ContainerDied","Data":"38f2f0dd7f7dc741394ff28ad00f27e0758e016c4a9e439f0364eeaccca0a596"} Feb 27 17:56:17 crc kubenswrapper[4751]: I0227 17:56:17.475853 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="38f2f0dd7f7dc741394ff28ad00f27e0758e016c4a9e439f0364eeaccca0a596" Feb 27 17:56:17 crc kubenswrapper[4751]: I0227 17:56:17.475547 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536916-4wrfm" Feb 27 17:56:17 crc kubenswrapper[4751]: I0227 17:56:17.862500 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536910-n5xg2"] Feb 27 17:56:17 crc kubenswrapper[4751]: I0227 17:56:17.869574 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536910-n5xg2"] Feb 27 17:56:18 crc kubenswrapper[4751]: I0227 17:56:18.550348 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8e60c4e-c1a0-4310-8275-865935c980a6" path="/var/lib/kubelet/pods/d8e60c4e-c1a0-4310-8275-865935c980a6/volumes" Feb 27 17:56:21 crc kubenswrapper[4751]: I0227 17:56:21.784738 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7_b5355a8f-edd7-4c96-94f6-0f2904459f73/util/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.034427 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7_b5355a8f-edd7-4c96-94f6-0f2904459f73/pull/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.039386 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7_b5355a8f-edd7-4c96-94f6-0f2904459f73/pull/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.044969 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7_b5355a8f-edd7-4c96-94f6-0f2904459f73/util/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.192816 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7_b5355a8f-edd7-4c96-94f6-0f2904459f73/util/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.199897 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7_b5355a8f-edd7-4c96-94f6-0f2904459f73/pull/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.203688 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_0e94e7566f739476ccec6d16e58de3f1c434cfa3060893f90f3e473a82br2v7_b5355a8f-edd7-4c96-94f6-0f2904459f73/extract/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.354159 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm_b9d9ee08-5a13-4f98-916d-b9b330f8963c/util/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.532227 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm_b9d9ee08-5a13-4f98-916d-b9b330f8963c/pull/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.544153 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm_b9d9ee08-5a13-4f98-916d-b9b330f8963c/util/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.588644 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm_b9d9ee08-5a13-4f98-916d-b9b330f8963c/pull/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.676857 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm_b9d9ee08-5a13-4f98-916d-b9b330f8963c/util/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.723088 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm_b9d9ee08-5a13-4f98-916d-b9b330f8963c/extract/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.756296 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5kmsxm_b9d9ee08-5a13-4f98-916d-b9b330f8963c/pull/0.log" Feb 27 17:56:22 crc kubenswrapper[4751]: I0227 17:56:22.857638 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p22qp_d537dc1a-e8f2-4288-8791-dc83e923cd75/extract-utilities/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.016924 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p22qp_d537dc1a-e8f2-4288-8791-dc83e923cd75/extract-utilities/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.024808 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p22qp_d537dc1a-e8f2-4288-8791-dc83e923cd75/extract-content/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.027072 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p22qp_d537dc1a-e8f2-4288-8791-dc83e923cd75/extract-content/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.193395 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p22qp_d537dc1a-e8f2-4288-8791-dc83e923cd75/extract-utilities/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.215352 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p22qp_d537dc1a-e8f2-4288-8791-dc83e923cd75/extract-content/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.413712 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pdccw_9d9a23e1-4ca5-4648-b249-303e9e41a14c/extract-utilities/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.502837 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p22qp_d537dc1a-e8f2-4288-8791-dc83e923cd75/registry-server/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.663850 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pdccw_9d9a23e1-4ca5-4648-b249-303e9e41a14c/extract-content/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.677548 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pdccw_9d9a23e1-4ca5-4648-b249-303e9e41a14c/extract-utilities/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.684621 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pdccw_9d9a23e1-4ca5-4648-b249-303e9e41a14c/extract-content/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.879887 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pdccw_9d9a23e1-4ca5-4648-b249-303e9e41a14c/extract-content/0.log" Feb 27 17:56:23 crc kubenswrapper[4751]: I0227 17:56:23.883521 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pdccw_9d9a23e1-4ca5-4648-b249-303e9e41a14c/extract-utilities/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.101837 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7_1a21af89-bee9-466f-af38-f9b30329134e/util/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.258326 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7_1a21af89-bee9-466f-af38-f9b30329134e/util/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.320752 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7_1a21af89-bee9-466f-af38-f9b30329134e/pull/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.345197 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7_1a21af89-bee9-466f-af38-f9b30329134e/pull/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.518793 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7_1a21af89-bee9-466f-af38-f9b30329134e/util/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.548584 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7_1a21af89-bee9-466f-af38-f9b30329134e/pull/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.569171 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_d146760600e43041070ad4572d9c23f31a62e3aefc01a54998863bc5f47sfd7_1a21af89-bee9-466f-af38-f9b30329134e/extract/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.576911 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pdccw_9d9a23e1-4ca5-4648-b249-303e9e41a14c/registry-server/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.707132 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kbkfj_c88a4f09-3810-4b81-9a96-7158892ac367/marketplace-operator/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.760670 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9p4vl_031d7edf-76d2-4e3c-9985-0d9759f9c8d6/extract-utilities/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.916577 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9p4vl_031d7edf-76d2-4e3c-9985-0d9759f9c8d6/extract-content/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.963309 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9p4vl_031d7edf-76d2-4e3c-9985-0d9759f9c8d6/extract-utilities/0.log" Feb 27 17:56:24 crc kubenswrapper[4751]: I0227 17:56:24.966729 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9p4vl_031d7edf-76d2-4e3c-9985-0d9759f9c8d6/extract-content/0.log" Feb 27 17:56:25 crc kubenswrapper[4751]: I0227 17:56:25.083135 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9p4vl_031d7edf-76d2-4e3c-9985-0d9759f9c8d6/extract-utilities/0.log" Feb 27 17:56:25 crc kubenswrapper[4751]: I0227 17:56:25.113757 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9p4vl_031d7edf-76d2-4e3c-9985-0d9759f9c8d6/extract-content/0.log" Feb 27 17:56:25 crc kubenswrapper[4751]: I0227 17:56:25.318180 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-j626z_5195d38b-7810-4277-b020-5a89a9189dc0/extract-utilities/0.log" Feb 27 17:56:25 crc kubenswrapper[4751]: I0227 17:56:25.333886 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-9p4vl_031d7edf-76d2-4e3c-9985-0d9759f9c8d6/registry-server/0.log" Feb 27 17:56:25 crc kubenswrapper[4751]: I0227 17:56:25.459271 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-j626z_5195d38b-7810-4277-b020-5a89a9189dc0/extract-content/0.log" Feb 27 17:56:25 crc kubenswrapper[4751]: I0227 17:56:25.469200 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-j626z_5195d38b-7810-4277-b020-5a89a9189dc0/extract-utilities/0.log" Feb 27 17:56:25 crc kubenswrapper[4751]: I0227 17:56:25.489998 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-j626z_5195d38b-7810-4277-b020-5a89a9189dc0/extract-content/0.log" Feb 27 17:56:25 crc kubenswrapper[4751]: I0227 17:56:25.607878 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-j626z_5195d38b-7810-4277-b020-5a89a9189dc0/extract-utilities/0.log" Feb 27 17:56:25 crc kubenswrapper[4751]: I0227 17:56:25.633104 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-j626z_5195d38b-7810-4277-b020-5a89a9189dc0/extract-content/0.log" Feb 27 17:56:26 crc kubenswrapper[4751]: I0227 17:56:26.278141 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-j626z_5195d38b-7810-4277-b020-5a89a9189dc0/registry-server/0.log" Feb 27 17:56:28 crc kubenswrapper[4751]: I0227 17:56:28.528200 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:56:28 crc kubenswrapper[4751]: E0227 17:56:28.528884 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:56:41 crc kubenswrapper[4751]: I0227 17:56:41.521477 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:56:41 crc kubenswrapper[4751]: E0227 17:56:41.522469 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:56:53 crc kubenswrapper[4751]: I0227 17:56:53.123077 4751 scope.go:117] "RemoveContainer" containerID="4056bc4f102d5da178a8e0031251e7fea1461aa51dca8d2a44f74bc7b2f9c8be" Feb 27 17:56:53 crc kubenswrapper[4751]: I0227 17:56:53.521983 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:56:53 crc kubenswrapper[4751]: E0227 17:56:53.522688 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:57:04 crc kubenswrapper[4751]: I0227 17:57:04.521327 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:57:04 crc kubenswrapper[4751]: E0227 17:57:04.522309 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:57:17 crc kubenswrapper[4751]: I0227 17:57:17.521062 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:57:17 crc kubenswrapper[4751]: E0227 17:57:17.522122 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:57:23 crc kubenswrapper[4751]: I0227 17:57:23.075172 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zxkc7"] Feb 27 17:57:23 crc kubenswrapper[4751]: I0227 17:57:23.091135 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-zxkc7"] Feb 27 17:57:24 crc kubenswrapper[4751]: I0227 17:57:24.538336 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d766e975-f2e6-4733-aaea-d3725ec03ec2" path="/var/lib/kubelet/pods/d766e975-f2e6-4733-aaea-d3725ec03ec2/volumes" Feb 27 17:57:32 crc kubenswrapper[4751]: I0227 17:57:32.521381 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:57:32 crc kubenswrapper[4751]: E0227 17:57:32.524243 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:57:43 crc kubenswrapper[4751]: I0227 17:57:43.520750 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:57:43 crc kubenswrapper[4751]: E0227 17:57:43.521744 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:57:48 crc kubenswrapper[4751]: I0227 17:57:48.257638 4751 generic.go:334] "Generic (PLEG): container finished" podID="8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" containerID="41abf478f145aa71ef6354f31c2c4d743c04fd5a756e90b6df776440ae11d531" exitCode=0 Feb 27 17:57:48 crc kubenswrapper[4751]: I0227 17:57:48.257779 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cwkkv/must-gather-c54qh" event={"ID":"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533","Type":"ContainerDied","Data":"41abf478f145aa71ef6354f31c2c4d743c04fd5a756e90b6df776440ae11d531"} Feb 27 17:57:48 crc kubenswrapper[4751]: I0227 17:57:48.259186 4751 scope.go:117] "RemoveContainer" containerID="41abf478f145aa71ef6354f31c2c4d743c04fd5a756e90b6df776440ae11d531" Feb 27 17:57:48 crc kubenswrapper[4751]: I0227 17:57:48.801341 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-cwkkv_must-gather-c54qh_8a6e75a1-1d82-49e3-8c4f-1b836cf5c533/gather/0.log" Feb 27 17:57:53 crc kubenswrapper[4751]: I0227 17:57:53.232048 4751 scope.go:117] "RemoveContainer" containerID="47768b7f4071f5f4ba301d2c2afc27d314d158aa4007096a04ca21761cbb9d09" Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.138512 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-cwkkv/must-gather-c54qh"] Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.139060 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-cwkkv/must-gather-c54qh" podUID="8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" containerName="copy" containerID="cri-o://dc968966fe9e198e27d3d584d5f63107477eff28bbee525a56c0baaee97d87da" gracePeriod=2 Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.154520 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-cwkkv/must-gather-c54qh"] Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.336452 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-cwkkv_must-gather-c54qh_8a6e75a1-1d82-49e3-8c4f-1b836cf5c533/copy/0.log" Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.337248 4751 generic.go:334] "Generic (PLEG): container finished" podID="8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" containerID="dc968966fe9e198e27d3d584d5f63107477eff28bbee525a56c0baaee97d87da" exitCode=143 Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.521044 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:57:56 crc kubenswrapper[4751]: E0227 17:57:56.521467 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.597762 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-cwkkv_must-gather-c54qh_8a6e75a1-1d82-49e3-8c4f-1b836cf5c533/copy/0.log" Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.598187 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cwkkv/must-gather-c54qh" Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.717982 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8rgb\" (UniqueName: \"kubernetes.io/projected/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-kube-api-access-q8rgb\") pod \"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533\" (UID: \"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533\") " Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.718055 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-must-gather-output\") pod \"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533\" (UID: \"8a6e75a1-1d82-49e3-8c4f-1b836cf5c533\") " Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.726765 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-kube-api-access-q8rgb" (OuterVolumeSpecName: "kube-api-access-q8rgb") pod "8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" (UID: "8a6e75a1-1d82-49e3-8c4f-1b836cf5c533"). InnerVolumeSpecName "kube-api-access-q8rgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.820116 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8rgb\" (UniqueName: \"kubernetes.io/projected/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-kube-api-access-q8rgb\") on node \"crc\" DevicePath \"\"" Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.846164 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" (UID: "8a6e75a1-1d82-49e3-8c4f-1b836cf5c533"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:57:56 crc kubenswrapper[4751]: I0227 17:57:56.922481 4751 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533-must-gather-output\") on node \"crc\" DevicePath \"\"" Feb 27 17:57:57 crc kubenswrapper[4751]: I0227 17:57:57.345217 4751 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-cwkkv_must-gather-c54qh_8a6e75a1-1d82-49e3-8c4f-1b836cf5c533/copy/0.log" Feb 27 17:57:57 crc kubenswrapper[4751]: I0227 17:57:57.345662 4751 scope.go:117] "RemoveContainer" containerID="dc968966fe9e198e27d3d584d5f63107477eff28bbee525a56c0baaee97d87da" Feb 27 17:57:57 crc kubenswrapper[4751]: I0227 17:57:57.345707 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cwkkv/must-gather-c54qh" Feb 27 17:57:57 crc kubenswrapper[4751]: I0227 17:57:57.362108 4751 scope.go:117] "RemoveContainer" containerID="41abf478f145aa71ef6354f31c2c4d743c04fd5a756e90b6df776440ae11d531" Feb 27 17:57:58 crc kubenswrapper[4751]: I0227 17:57:58.532545 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" path="/var/lib/kubelet/pods/8a6e75a1-1d82-49e3-8c4f-1b836cf5c533/volumes" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.163282 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536918-lbbck"] Feb 27 17:58:00 crc kubenswrapper[4751]: E0227 17:58:00.163819 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" containerName="gather" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.163832 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" containerName="gather" Feb 27 17:58:00 crc kubenswrapper[4751]: E0227 17:58:00.163848 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" containerName="copy" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.163853 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" containerName="copy" Feb 27 17:58:00 crc kubenswrapper[4751]: E0227 17:58:00.163866 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6db4b30-a183-4074-87d5-5d3e53c3ad90" containerName="oc" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.163871 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6db4b30-a183-4074-87d5-5d3e53c3ad90" containerName="oc" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.163993 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" containerName="gather" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.164010 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a6e75a1-1d82-49e3-8c4f-1b836cf5c533" containerName="copy" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.164020 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6db4b30-a183-4074-87d5-5d3e53c3ad90" containerName="oc" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.164523 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536918-lbbck" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.166632 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"kube-root-ca.crt" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.166838 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-infra"/"csr-approver-sa-dockercfg-c2k26" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.169728 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-infra"/"openshift-service-ca.crt" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.179187 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536918-lbbck"] Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.271286 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wrfb\" (UniqueName: \"kubernetes.io/projected/92977b18-9292-4958-a0b2-7348d83a24b5-kube-api-access-5wrfb\") pod \"auto-csr-approver-29536918-lbbck\" (UID: \"92977b18-9292-4958-a0b2-7348d83a24b5\") " pod="openshift-infra/auto-csr-approver-29536918-lbbck" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.372426 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wrfb\" (UniqueName: \"kubernetes.io/projected/92977b18-9292-4958-a0b2-7348d83a24b5-kube-api-access-5wrfb\") pod \"auto-csr-approver-29536918-lbbck\" (UID: \"92977b18-9292-4958-a0b2-7348d83a24b5\") " pod="openshift-infra/auto-csr-approver-29536918-lbbck" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.394154 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wrfb\" (UniqueName: \"kubernetes.io/projected/92977b18-9292-4958-a0b2-7348d83a24b5-kube-api-access-5wrfb\") pod \"auto-csr-approver-29536918-lbbck\" (UID: \"92977b18-9292-4958-a0b2-7348d83a24b5\") " pod="openshift-infra/auto-csr-approver-29536918-lbbck" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.478558 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536918-lbbck" Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.932748 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536918-lbbck"] Feb 27 17:58:00 crc kubenswrapper[4751]: I0227 17:58:00.944300 4751 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 27 17:58:01 crc kubenswrapper[4751]: I0227 17:58:01.385643 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536918-lbbck" event={"ID":"92977b18-9292-4958-a0b2-7348d83a24b5","Type":"ContainerStarted","Data":"cd914ed5267e4087fd216814e8eeb28f3bb35c6f8ec271e748269ba4c91e6029"} Feb 27 17:58:01 crc kubenswrapper[4751]: E0227 17:58:01.989548 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:58:01 crc kubenswrapper[4751]: E0227 17:58:01.990075 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:58:01 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:58:01 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5wrfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536918-lbbck_openshift-infra(92977b18-9292-4958-a0b2-7348d83a24b5): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:58:01 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:58:01 crc kubenswrapper[4751]: E0227 17:58:01.991679 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 17:58:02 crc kubenswrapper[4751]: E0227 17:58:02.397463 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 17:58:07 crc kubenswrapper[4751]: I0227 17:58:07.522458 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:58:07 crc kubenswrapper[4751]: E0227 17:58:07.523390 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:58:18 crc kubenswrapper[4751]: E0227 17:58:18.652968 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:58:18 crc kubenswrapper[4751]: E0227 17:58:18.653726 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:58:18 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:58:18 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5wrfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536918-lbbck_openshift-infra(92977b18-9292-4958-a0b2-7348d83a24b5): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:58:18 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:58:18 crc kubenswrapper[4751]: E0227 17:58:18.655285 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 17:58:21 crc kubenswrapper[4751]: I0227 17:58:21.521542 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:58:21 crc kubenswrapper[4751]: E0227 17:58:21.522715 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rkcdq_openshift-machine-config-operator(d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f)\"" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" Feb 27 17:58:33 crc kubenswrapper[4751]: E0227 17:58:33.523485 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 17:58:34 crc kubenswrapper[4751]: I0227 17:58:34.521713 4751 scope.go:117] "RemoveContainer" containerID="0031311aae0a0515286c779f6322937f35c876d17a320caee36e5bd163511c0f" Feb 27 17:58:34 crc kubenswrapper[4751]: I0227 17:58:34.883738 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2krgj"] Feb 27 17:58:34 crc kubenswrapper[4751]: I0227 17:58:34.887246 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:34 crc kubenswrapper[4751]: I0227 17:58:34.900376 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2krgj"] Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.002569 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0765e0f-9f1a-41d5-b345-bee0355a62a4-catalog-content\") pod \"redhat-marketplace-2krgj\" (UID: \"a0765e0f-9f1a-41d5-b345-bee0355a62a4\") " pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.002773 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0765e0f-9f1a-41d5-b345-bee0355a62a4-utilities\") pod \"redhat-marketplace-2krgj\" (UID: \"a0765e0f-9f1a-41d5-b345-bee0355a62a4\") " pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.002825 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5m9h\" (UniqueName: \"kubernetes.io/projected/a0765e0f-9f1a-41d5-b345-bee0355a62a4-kube-api-access-x5m9h\") pod \"redhat-marketplace-2krgj\" (UID: \"a0765e0f-9f1a-41d5-b345-bee0355a62a4\") " pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.104131 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0765e0f-9f1a-41d5-b345-bee0355a62a4-utilities\") pod \"redhat-marketplace-2krgj\" (UID: \"a0765e0f-9f1a-41d5-b345-bee0355a62a4\") " pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.104207 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5m9h\" (UniqueName: \"kubernetes.io/projected/a0765e0f-9f1a-41d5-b345-bee0355a62a4-kube-api-access-x5m9h\") pod \"redhat-marketplace-2krgj\" (UID: \"a0765e0f-9f1a-41d5-b345-bee0355a62a4\") " pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.104258 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0765e0f-9f1a-41d5-b345-bee0355a62a4-catalog-content\") pod \"redhat-marketplace-2krgj\" (UID: \"a0765e0f-9f1a-41d5-b345-bee0355a62a4\") " pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.104930 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0765e0f-9f1a-41d5-b345-bee0355a62a4-utilities\") pod \"redhat-marketplace-2krgj\" (UID: \"a0765e0f-9f1a-41d5-b345-bee0355a62a4\") " pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.105228 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0765e0f-9f1a-41d5-b345-bee0355a62a4-catalog-content\") pod \"redhat-marketplace-2krgj\" (UID: \"a0765e0f-9f1a-41d5-b345-bee0355a62a4\") " pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.140156 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5m9h\" (UniqueName: \"kubernetes.io/projected/a0765e0f-9f1a-41d5-b345-bee0355a62a4-kube-api-access-x5m9h\") pod \"redhat-marketplace-2krgj\" (UID: \"a0765e0f-9f1a-41d5-b345-bee0355a62a4\") " pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.218011 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2krgj" Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.663046 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2krgj"] Feb 27 17:58:35 crc kubenswrapper[4751]: W0227 17:58:35.664437 4751 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0765e0f_9f1a_41d5_b345_bee0355a62a4.slice/crio-407362322f67c8acefa7df2633b8d1588cbded378e80ed90a5ec799686de64c5 WatchSource:0}: Error finding container 407362322f67c8acefa7df2633b8d1588cbded378e80ed90a5ec799686de64c5: Status 404 returned error can't find the container with id 407362322f67c8acefa7df2633b8d1588cbded378e80ed90a5ec799686de64c5 Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.706030 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" event={"ID":"d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f","Type":"ContainerStarted","Data":"d2fdf2747712835f4866bc5453d9d396415acc108b63cffe66ea43d77404fee8"} Feb 27 17:58:35 crc kubenswrapper[4751]: I0227 17:58:35.707907 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2krgj" event={"ID":"a0765e0f-9f1a-41d5-b345-bee0355a62a4","Type":"ContainerStarted","Data":"407362322f67c8acefa7df2633b8d1588cbded378e80ed90a5ec799686de64c5"} Feb 27 17:58:36 crc kubenswrapper[4751]: I0227 17:58:36.718859 4751 generic.go:334] "Generic (PLEG): container finished" podID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" containerID="d4e086be48cac881b5dbe55bd161bf1d27892944bec7afd6229d6b28a035a271" exitCode=0 Feb 27 17:58:36 crc kubenswrapper[4751]: I0227 17:58:36.718976 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2krgj" event={"ID":"a0765e0f-9f1a-41d5-b345-bee0355a62a4","Type":"ContainerDied","Data":"d4e086be48cac881b5dbe55bd161bf1d27892944bec7afd6229d6b28a035a271"} Feb 27 17:58:37 crc kubenswrapper[4751]: E0227 17:58:37.278078 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 27 17:58:37 crc kubenswrapper[4751]: E0227 17:58:37.278685 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x5m9h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-2krgj_openshift-marketplace(a0765e0f-9f1a-41d5-b345-bee0355a62a4): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:58:37 crc kubenswrapper[4751]: E0227 17:58:37.279995 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 17:58:37 crc kubenswrapper[4751]: E0227 17:58:37.727961 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 17:58:48 crc kubenswrapper[4751]: E0227 17:58:48.386971 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 17:58:48 crc kubenswrapper[4751]: E0227 17:58:48.387982 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 17:58:48 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 17:58:48 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5wrfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536918-lbbck_openshift-infra(92977b18-9292-4958-a0b2-7348d83a24b5): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 17:58:48 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 17:58:48 crc kubenswrapper[4751]: E0227 17:58:48.389258 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 17:58:50 crc kubenswrapper[4751]: E0227 17:58:50.101243 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 27 17:58:50 crc kubenswrapper[4751]: E0227 17:58:50.101708 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x5m9h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-2krgj_openshift-marketplace(a0765e0f-9f1a-41d5-b345-bee0355a62a4): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:58:50 crc kubenswrapper[4751]: E0227 17:58:50.102936 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 17:59:01 crc kubenswrapper[4751]: E0227 17:59:01.524496 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 17:59:02 crc kubenswrapper[4751]: E0227 17:59:02.523445 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 17:59:13 crc kubenswrapper[4751]: E0227 17:59:13.524444 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 17:59:17 crc kubenswrapper[4751]: E0227 17:59:17.096923 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 27 17:59:17 crc kubenswrapper[4751]: E0227 17:59:17.097558 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x5m9h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-2krgj_openshift-marketplace(a0765e0f-9f1a-41d5-b345-bee0355a62a4): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 17:59:17 crc kubenswrapper[4751]: E0227 17:59:17.098838 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.766551 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kgpwq"] Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.770192 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.780577 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kgpwq"] Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.858174 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5bgb\" (UniqueName: \"kubernetes.io/projected/9e56d14e-9412-405b-b6bd-f7815e4cbe76-kube-api-access-m5bgb\") pod \"community-operators-kgpwq\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.858235 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-catalog-content\") pod \"community-operators-kgpwq\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.858307 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-utilities\") pod \"community-operators-kgpwq\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.960082 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-utilities\") pod \"community-operators-kgpwq\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.960242 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5bgb\" (UniqueName: \"kubernetes.io/projected/9e56d14e-9412-405b-b6bd-f7815e4cbe76-kube-api-access-m5bgb\") pod \"community-operators-kgpwq\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.960273 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-catalog-content\") pod \"community-operators-kgpwq\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.960856 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-utilities\") pod \"community-operators-kgpwq\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.960919 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-catalog-content\") pod \"community-operators-kgpwq\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:17 crc kubenswrapper[4751]: I0227 17:59:17.981630 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5bgb\" (UniqueName: \"kubernetes.io/projected/9e56d14e-9412-405b-b6bd-f7815e4cbe76-kube-api-access-m5bgb\") pod \"community-operators-kgpwq\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:18 crc kubenswrapper[4751]: I0227 17:59:18.115720 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:18 crc kubenswrapper[4751]: I0227 17:59:18.573106 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kgpwq"] Feb 27 17:59:19 crc kubenswrapper[4751]: I0227 17:59:19.112979 4751 generic.go:334] "Generic (PLEG): container finished" podID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerID="bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2" exitCode=0 Feb 27 17:59:19 crc kubenswrapper[4751]: I0227 17:59:19.113285 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgpwq" event={"ID":"9e56d14e-9412-405b-b6bd-f7815e4cbe76","Type":"ContainerDied","Data":"bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2"} Feb 27 17:59:19 crc kubenswrapper[4751]: I0227 17:59:19.113314 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgpwq" event={"ID":"9e56d14e-9412-405b-b6bd-f7815e4cbe76","Type":"ContainerStarted","Data":"e1a5abc73f3df553b30e5281b6d7464a1d65db8d1346eb8480695ff6eedc2ed0"} Feb 27 17:59:20 crc kubenswrapper[4751]: I0227 17:59:20.122301 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgpwq" event={"ID":"9e56d14e-9412-405b-b6bd-f7815e4cbe76","Type":"ContainerStarted","Data":"109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec"} Feb 27 17:59:21 crc kubenswrapper[4751]: I0227 17:59:21.136006 4751 generic.go:334] "Generic (PLEG): container finished" podID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerID="109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec" exitCode=0 Feb 27 17:59:21 crc kubenswrapper[4751]: I0227 17:59:21.136102 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgpwq" event={"ID":"9e56d14e-9412-405b-b6bd-f7815e4cbe76","Type":"ContainerDied","Data":"109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec"} Feb 27 17:59:22 crc kubenswrapper[4751]: I0227 17:59:22.148707 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgpwq" event={"ID":"9e56d14e-9412-405b-b6bd-f7815e4cbe76","Type":"ContainerStarted","Data":"9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468"} Feb 27 17:59:22 crc kubenswrapper[4751]: I0227 17:59:22.179275 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kgpwq" podStartSLOduration=2.736863462 podStartE2EDuration="5.179254884s" podCreationTimestamp="2026-02-27 17:59:17 +0000 UTC" firstStartedPulling="2026-02-27 17:59:19.11512564 +0000 UTC m=+5721.262140087" lastFinishedPulling="2026-02-27 17:59:21.557517022 +0000 UTC m=+5723.704531509" observedRunningTime="2026-02-27 17:59:22.17075515 +0000 UTC m=+5724.317769627" watchObservedRunningTime="2026-02-27 17:59:22.179254884 +0000 UTC m=+5724.326269351" Feb 27 17:59:27 crc kubenswrapper[4751]: E0227 17:59:27.524046 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 17:59:28 crc kubenswrapper[4751]: I0227 17:59:28.116641 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:28 crc kubenswrapper[4751]: I0227 17:59:28.116714 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:28 crc kubenswrapper[4751]: I0227 17:59:28.195172 4751 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:28 crc kubenswrapper[4751]: I0227 17:59:28.281492 4751 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:28 crc kubenswrapper[4751]: I0227 17:59:28.451798 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kgpwq"] Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.221088 4751 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kgpwq" podUID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerName="registry-server" containerID="cri-o://9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468" gracePeriod=2 Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.657563 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.788361 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5bgb\" (UniqueName: \"kubernetes.io/projected/9e56d14e-9412-405b-b6bd-f7815e4cbe76-kube-api-access-m5bgb\") pod \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.788951 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-utilities\") pod \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.789023 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-catalog-content\") pod \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\" (UID: \"9e56d14e-9412-405b-b6bd-f7815e4cbe76\") " Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.789953 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-utilities" (OuterVolumeSpecName: "utilities") pod "9e56d14e-9412-405b-b6bd-f7815e4cbe76" (UID: "9e56d14e-9412-405b-b6bd-f7815e4cbe76"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.798823 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e56d14e-9412-405b-b6bd-f7815e4cbe76-kube-api-access-m5bgb" (OuterVolumeSpecName: "kube-api-access-m5bgb") pod "9e56d14e-9412-405b-b6bd-f7815e4cbe76" (UID: "9e56d14e-9412-405b-b6bd-f7815e4cbe76"). InnerVolumeSpecName "kube-api-access-m5bgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.854665 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9e56d14e-9412-405b-b6bd-f7815e4cbe76" (UID: "9e56d14e-9412-405b-b6bd-f7815e4cbe76"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.892030 4751 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.892079 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5bgb\" (UniqueName: \"kubernetes.io/projected/9e56d14e-9412-405b-b6bd-f7815e4cbe76-kube-api-access-m5bgb\") on node \"crc\" DevicePath \"\"" Feb 27 17:59:30 crc kubenswrapper[4751]: I0227 17:59:30.892095 4751 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e56d14e-9412-405b-b6bd-f7815e4cbe76-utilities\") on node \"crc\" DevicePath \"\"" Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.233233 4751 generic.go:334] "Generic (PLEG): container finished" podID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerID="9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468" exitCode=0 Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.233307 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgpwq" event={"ID":"9e56d14e-9412-405b-b6bd-f7815e4cbe76","Type":"ContainerDied","Data":"9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468"} Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.233377 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgpwq" event={"ID":"9e56d14e-9412-405b-b6bd-f7815e4cbe76","Type":"ContainerDied","Data":"e1a5abc73f3df553b30e5281b6d7464a1d65db8d1346eb8480695ff6eedc2ed0"} Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.233313 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kgpwq" Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.233462 4751 scope.go:117] "RemoveContainer" containerID="9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468" Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.261489 4751 scope.go:117] "RemoveContainer" containerID="109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec" Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.302384 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kgpwq"] Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.303785 4751 scope.go:117] "RemoveContainer" containerID="bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2" Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.309346 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kgpwq"] Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.339873 4751 scope.go:117] "RemoveContainer" containerID="9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468" Feb 27 17:59:31 crc kubenswrapper[4751]: E0227 17:59:31.340275 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468\": container with ID starting with 9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468 not found: ID does not exist" containerID="9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468" Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.340322 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468"} err="failed to get container status \"9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468\": rpc error: code = NotFound desc = could not find container \"9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468\": container with ID starting with 9e85cc162bf929408e35b4502c3116c8029ad72b0929dd0e250256a11f8b9468 not found: ID does not exist" Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.340351 4751 scope.go:117] "RemoveContainer" containerID="109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec" Feb 27 17:59:31 crc kubenswrapper[4751]: E0227 17:59:31.341039 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec\": container with ID starting with 109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec not found: ID does not exist" containerID="109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec" Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.341093 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec"} err="failed to get container status \"109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec\": rpc error: code = NotFound desc = could not find container \"109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec\": container with ID starting with 109465a2c571c6d23fab6605118ea04a6a3aaee4b3f4b8da35a63a8fe9ae2dec not found: ID does not exist" Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.341123 4751 scope.go:117] "RemoveContainer" containerID="bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2" Feb 27 17:59:31 crc kubenswrapper[4751]: E0227 17:59:31.341558 4751 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2\": container with ID starting with bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2 not found: ID does not exist" containerID="bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2" Feb 27 17:59:31 crc kubenswrapper[4751]: I0227 17:59:31.341591 4751 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2"} err="failed to get container status \"bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2\": rpc error: code = NotFound desc = could not find container \"bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2\": container with ID starting with bab7661517e88273aeb2b3b742ee92363091528cfc3d6234c0150106d48969d2 not found: ID does not exist" Feb 27 17:59:31 crc kubenswrapper[4751]: E0227 17:59:31.523229 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 17:59:32 crc kubenswrapper[4751]: I0227 17:59:32.538856 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" path="/var/lib/kubelet/pods/9e56d14e-9412-405b-b6bd-f7815e4cbe76/volumes" Feb 27 17:59:42 crc kubenswrapper[4751]: E0227 17:59:42.526228 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 17:59:53 crc kubenswrapper[4751]: E0227 17:59:53.523634 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.157614 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29536920-4qd8x"] Feb 27 18:00:00 crc kubenswrapper[4751]: E0227 18:00:00.158651 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerName="registry-server" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.158673 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerName="registry-server" Feb 27 18:00:00 crc kubenswrapper[4751]: E0227 18:00:00.158721 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerName="extract-content" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.158734 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerName="extract-content" Feb 27 18:00:00 crc kubenswrapper[4751]: E0227 18:00:00.158762 4751 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerName="extract-utilities" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.158777 4751 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerName="extract-utilities" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.159055 4751 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e56d14e-9412-405b-b6bd-f7815e4cbe76" containerName="registry-server" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.160105 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536920-4qd8x" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.166439 4751 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k"] Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.167741 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.171644 4751 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.171905 4751 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.183476 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536920-4qd8x"] Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.209813 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k"] Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.232181 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-secret-volume\") pod \"collect-profiles-29536920-2sf8k\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.232244 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bcgs\" (UniqueName: \"kubernetes.io/projected/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-kube-api-access-7bcgs\") pod \"collect-profiles-29536920-2sf8k\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.232328 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q9dn\" (UniqueName: \"kubernetes.io/projected/43d9ef6c-2de4-4d32-b2be-1a9056cc622c-kube-api-access-2q9dn\") pod \"auto-csr-approver-29536920-4qd8x\" (UID: \"43d9ef6c-2de4-4d32-b2be-1a9056cc622c\") " pod="openshift-infra/auto-csr-approver-29536920-4qd8x" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.232532 4751 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-config-volume\") pod \"collect-profiles-29536920-2sf8k\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.334260 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-secret-volume\") pod \"collect-profiles-29536920-2sf8k\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.334317 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bcgs\" (UniqueName: \"kubernetes.io/projected/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-kube-api-access-7bcgs\") pod \"collect-profiles-29536920-2sf8k\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.334371 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2q9dn\" (UniqueName: \"kubernetes.io/projected/43d9ef6c-2de4-4d32-b2be-1a9056cc622c-kube-api-access-2q9dn\") pod \"auto-csr-approver-29536920-4qd8x\" (UID: \"43d9ef6c-2de4-4d32-b2be-1a9056cc622c\") " pod="openshift-infra/auto-csr-approver-29536920-4qd8x" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.334447 4751 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-config-volume\") pod \"collect-profiles-29536920-2sf8k\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.335351 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-config-volume\") pod \"collect-profiles-29536920-2sf8k\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.343710 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-secret-volume\") pod \"collect-profiles-29536920-2sf8k\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.349944 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bcgs\" (UniqueName: \"kubernetes.io/projected/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-kube-api-access-7bcgs\") pod \"collect-profiles-29536920-2sf8k\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.353266 4751 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q9dn\" (UniqueName: \"kubernetes.io/projected/43d9ef6c-2de4-4d32-b2be-1a9056cc622c-kube-api-access-2q9dn\") pod \"auto-csr-approver-29536920-4qd8x\" (UID: \"43d9ef6c-2de4-4d32-b2be-1a9056cc622c\") " pod="openshift-infra/auto-csr-approver-29536920-4qd8x" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.484724 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536920-4qd8x" Feb 27 18:00:00 crc kubenswrapper[4751]: I0227 18:00:00.494450 4751 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:01 crc kubenswrapper[4751]: I0227 18:00:01.014874 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29536920-4qd8x"] Feb 27 18:00:01 crc kubenswrapper[4751]: I0227 18:00:01.020241 4751 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k"] Feb 27 18:00:01 crc kubenswrapper[4751]: I0227 18:00:01.517695 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536920-4qd8x" event={"ID":"43d9ef6c-2de4-4d32-b2be-1a9056cc622c","Type":"ContainerStarted","Data":"3cc205d66958d67edc56075ea8588c2efc216be9df3f7bf5551adbe7dc69515c"} Feb 27 18:00:01 crc kubenswrapper[4751]: I0227 18:00:01.519345 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" event={"ID":"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e","Type":"ContainerStarted","Data":"447b253e9017963b507e7c4d315b41d2d6b8dd27ce90596f153f8b273d582e93"} Feb 27 18:00:01 crc kubenswrapper[4751]: I0227 18:00:01.519388 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" event={"ID":"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e","Type":"ContainerStarted","Data":"860db6a0a058cfdb6012a67ee38eb0ddc5c881fa4d2e0dca4964ac8bce615295"} Feb 27 18:00:01 crc kubenswrapper[4751]: I0227 18:00:01.540268 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" podStartSLOduration=1.5402425050000002 podStartE2EDuration="1.540242505s" podCreationTimestamp="2026-02-27 18:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-27 18:00:01.535744107 +0000 UTC m=+5763.682758554" watchObservedRunningTime="2026-02-27 18:00:01.540242505 +0000 UTC m=+5763.687256952" Feb 27 18:00:02 crc kubenswrapper[4751]: I0227 18:00:02.541682 4751 generic.go:334] "Generic (PLEG): container finished" podID="10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e" containerID="447b253e9017963b507e7c4d315b41d2d6b8dd27ce90596f153f8b273d582e93" exitCode=0 Feb 27 18:00:02 crc kubenswrapper[4751]: I0227 18:00:02.543097 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" event={"ID":"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e","Type":"ContainerDied","Data":"447b253e9017963b507e7c4d315b41d2d6b8dd27ce90596f153f8b273d582e93"} Feb 27 18:00:03 crc kubenswrapper[4751]: I0227 18:00:03.861661 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.028240 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bcgs\" (UniqueName: \"kubernetes.io/projected/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-kube-api-access-7bcgs\") pod \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.028335 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-config-volume\") pod \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.028444 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-secret-volume\") pod \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\" (UID: \"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e\") " Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.030219 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-config-volume" (OuterVolumeSpecName: "config-volume") pod "10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e" (UID: "10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.034380 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-kube-api-access-7bcgs" (OuterVolumeSpecName: "kube-api-access-7bcgs") pod "10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e" (UID: "10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e"). InnerVolumeSpecName "kube-api-access-7bcgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.035726 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e" (UID: "10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.130299 4751 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.130341 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bcgs\" (UniqueName: \"kubernetes.io/projected/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-kube-api-access-7bcgs\") on node \"crc\" DevicePath \"\"" Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.130354 4751 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e-config-volume\") on node \"crc\" DevicePath \"\"" Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.580584 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" event={"ID":"10a4fd7c-534a-4d9e-b901-f9b1bfa74e0e","Type":"ContainerDied","Data":"860db6a0a058cfdb6012a67ee38eb0ddc5c881fa4d2e0dca4964ac8bce615295"} Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.580647 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="860db6a0a058cfdb6012a67ee38eb0ddc5c881fa4d2e0dca4964ac8bce615295" Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.580720 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29536920-2sf8k" Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.634119 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2"] Feb 27 18:00:04 crc kubenswrapper[4751]: I0227 18:00:04.645539 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29536875-6kwl2"] Feb 27 18:00:05 crc kubenswrapper[4751]: I0227 18:00:05.589313 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536920-4qd8x" event={"ID":"43d9ef6c-2de4-4d32-b2be-1a9056cc622c","Type":"ContainerStarted","Data":"0c5da920ea94181936df22aa0fda534a063f0870dfebae38aa475b84ab4d569b"} Feb 27 18:00:05 crc kubenswrapper[4751]: I0227 18:00:05.622038 4751 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29536920-4qd8x" podStartSLOduration=1.463312328 podStartE2EDuration="5.622006771s" podCreationTimestamp="2026-02-27 18:00:00 +0000 UTC" firstStartedPulling="2026-02-27 18:00:01.025918614 +0000 UTC m=+5763.172933061" lastFinishedPulling="2026-02-27 18:00:05.184613037 +0000 UTC m=+5767.331627504" observedRunningTime="2026-02-27 18:00:05.607465858 +0000 UTC m=+5767.754480345" watchObservedRunningTime="2026-02-27 18:00:05.622006771 +0000 UTC m=+5767.769021228" Feb 27 18:00:06 crc kubenswrapper[4751]: E0227 18:00:06.214200 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 27 18:00:06 crc kubenswrapper[4751]: E0227 18:00:06.214757 4751 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x5m9h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-2krgj_openshift-marketplace(a0765e0f-9f1a-41d5-b345-bee0355a62a4): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)" logger="UnhandledError" Feb 27 18:00:06 crc kubenswrapper[4751]: E0227 18:00:06.216369 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/redhat/redhat-marketplace-index@sha256=e848a00af7690cfa41500b98e0e7a0b9738ce0af7b6b4fee3ea20e0838523c30/signature-2: status 500 (Internal Server Error)\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 18:00:06 crc kubenswrapper[4751]: I0227 18:00:06.536941 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b00aca05-48fe-45cb-a464-56fe49532233" path="/var/lib/kubelet/pods/b00aca05-48fe-45cb-a464-56fe49532233/volumes" Feb 27 18:00:06 crc kubenswrapper[4751]: I0227 18:00:06.604249 4751 generic.go:334] "Generic (PLEG): container finished" podID="43d9ef6c-2de4-4d32-b2be-1a9056cc622c" containerID="0c5da920ea94181936df22aa0fda534a063f0870dfebae38aa475b84ab4d569b" exitCode=0 Feb 27 18:00:06 crc kubenswrapper[4751]: I0227 18:00:06.604322 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536920-4qd8x" event={"ID":"43d9ef6c-2de4-4d32-b2be-1a9056cc622c","Type":"ContainerDied","Data":"0c5da920ea94181936df22aa0fda534a063f0870dfebae38aa475b84ab4d569b"} Feb 27 18:00:07 crc kubenswrapper[4751]: I0227 18:00:07.963893 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536920-4qd8x" Feb 27 18:00:08 crc kubenswrapper[4751]: I0227 18:00:08.093777 4751 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2q9dn\" (UniqueName: \"kubernetes.io/projected/43d9ef6c-2de4-4d32-b2be-1a9056cc622c-kube-api-access-2q9dn\") pod \"43d9ef6c-2de4-4d32-b2be-1a9056cc622c\" (UID: \"43d9ef6c-2de4-4d32-b2be-1a9056cc622c\") " Feb 27 18:00:08 crc kubenswrapper[4751]: I0227 18:00:08.105734 4751 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43d9ef6c-2de4-4d32-b2be-1a9056cc622c-kube-api-access-2q9dn" (OuterVolumeSpecName: "kube-api-access-2q9dn") pod "43d9ef6c-2de4-4d32-b2be-1a9056cc622c" (UID: "43d9ef6c-2de4-4d32-b2be-1a9056cc622c"). InnerVolumeSpecName "kube-api-access-2q9dn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 27 18:00:08 crc kubenswrapper[4751]: I0227 18:00:08.198179 4751 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2q9dn\" (UniqueName: \"kubernetes.io/projected/43d9ef6c-2de4-4d32-b2be-1a9056cc622c-kube-api-access-2q9dn\") on node \"crc\" DevicePath \"\"" Feb 27 18:00:08 crc kubenswrapper[4751]: I0227 18:00:08.625066 4751 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29536920-4qd8x" event={"ID":"43d9ef6c-2de4-4d32-b2be-1a9056cc622c","Type":"ContainerDied","Data":"3cc205d66958d67edc56075ea8588c2efc216be9df3f7bf5551adbe7dc69515c"} Feb 27 18:00:08 crc kubenswrapper[4751]: I0227 18:00:08.625125 4751 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3cc205d66958d67edc56075ea8588c2efc216be9df3f7bf5551adbe7dc69515c" Feb 27 18:00:08 crc kubenswrapper[4751]: I0227 18:00:08.625237 4751 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29536920-4qd8x" Feb 27 18:00:08 crc kubenswrapper[4751]: I0227 18:00:08.680387 4751 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29536912-mbp9n"] Feb 27 18:00:08 crc kubenswrapper[4751]: I0227 18:00:08.689091 4751 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29536912-mbp9n"] Feb 27 18:00:10 crc kubenswrapper[4751]: I0227 18:00:10.538669 4751 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca69b365-fbc1-4665-9b8b-86ab580acc22" path="/var/lib/kubelet/pods/ca69b365-fbc1-4665-9b8b-86ab580acc22/volumes" Feb 27 18:00:18 crc kubenswrapper[4751]: E0227 18:00:18.528777 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 18:00:25 crc kubenswrapper[4751]: E0227 18:00:25.324226 4751 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)" image="registry.redhat.io/openshift4/ose-cli:latest" Feb 27 18:00:25 crc kubenswrapper[4751]: E0227 18:00:25.324958 4751 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 27 18:00:25 crc kubenswrapper[4751]: container &Container{Name:oc,Image:registry.redhat.io/openshift4/ose-cli:latest,Command:[/bin/bash -c oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve Feb 27 18:00:25 crc kubenswrapper[4751]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5wrfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod auto-csr-approver-29536918-lbbck_openshift-infra(92977b18-9292-4958-a0b2-7348d83a24b5): ErrImagePull: copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error) Feb 27 18:00:25 crc kubenswrapper[4751]: > logger="UnhandledError" Feb 27 18:00:25 crc kubenswrapper[4751]: E0227 18:00:25.326626 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ErrImagePull: \"copying system image from manifest list: reading signatures: reading signature from https://registry.redhat.io/containers/sigstore/openshift4/ose-cli@sha256=69762925e16053d77685ff3a08b3b45dd2bfa5d68277851bc6969b368bbd0cb9/signature-7: status 500 (Internal Server Error)\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 18:00:29 crc kubenswrapper[4751]: E0227 18:00:29.524754 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 18:00:40 crc kubenswrapper[4751]: E0227 18:00:40.525104 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 18:00:44 crc kubenswrapper[4751]: E0227 18:00:44.523836 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 18:00:51 crc kubenswrapper[4751]: E0227 18:00:51.522034 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" Feb 27 18:00:53 crc kubenswrapper[4751]: I0227 18:00:53.378724 4751 scope.go:117] "RemoveContainer" containerID="7a2e2de4029336a2d8820b35c6e297f4713df17516ddaf9d023d6e6b6f38a4a3" Feb 27 18:00:53 crc kubenswrapper[4751]: I0227 18:00:53.414283 4751 scope.go:117] "RemoveContainer" containerID="0afe0469171a8b3a2c628bba9cca2b507ab62f3b75116c98814309cd6103dd5e" Feb 27 18:00:56 crc kubenswrapper[4751]: E0227 18:00:56.523965 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2krgj" podUID="a0765e0f-9f1a-41d5-b345-bee0355a62a4" Feb 27 18:00:58 crc kubenswrapper[4751]: I0227 18:00:58.918950 4751 patch_prober.go:28] interesting pod/machine-config-daemon-rkcdq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 27 18:00:58 crc kubenswrapper[4751]: I0227 18:00:58.919304 4751 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rkcdq" podUID="d2dc62fb-3001-4aaa-a4d7-e4fb8f22157f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 27 18:01:03 crc kubenswrapper[4751]: E0227 18:01:03.524549 4751 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oc\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-cli:latest\\\"\"" pod="openshift-infra/auto-csr-approver-29536918-lbbck" podUID="92977b18-9292-4958-a0b2-7348d83a24b5" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515150355755024460 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015150355756017376 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015150342035016503 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015150342035015453 5ustar corecore